Anbernic RG-DS AW87391 Speaker Amps

Merge series from Chris Morgan <macroalpha82@gmail.com>:

Add support for the Anbernic RG-DS Speaker Amplifiers. The Anbernic
RG-DS uses two AW87391 ICs at 0x58 and 0x5B on i2c2. However, the
manufacturer did not provide a firmware file, only a sequence of
register writes to each device to enable and disable them.

Add support for this *specific* configuration in the AW87390 driver.
Since we are relying on a device specific sequence I am using a
device specific compatible string. This driver does not currently
support the aw87391 for any other device as I have none to test
with valid firmware. Attempts to create firmware with the AwinicSCPv4
have not been successful.
This commit is contained in:
Mark Brown
2026-02-02 23:31:39 +00:00
199 changed files with 1554 additions and 828 deletions

View File

@@ -851,6 +851,7 @@ Valentin Schneider <vschneid@redhat.com> <valentin.schneider@arm.com>
Veera Sundaram Sankaran <quic_veeras@quicinc.com> <veeras@codeaurora.org>
Veerabhadrarao Badiganti <quic_vbadigan@quicinc.com> <vbadigan@codeaurora.org>
Venkateswara Naralasetty <quic_vnaralas@quicinc.com> <vnaralas@codeaurora.org>
Viacheslav Bocharov <v@baodeep.com> <adeep@lexina.in>
Vikash Garodia <vikash.garodia@oss.qualcomm.com> <vgarodia@codeaurora.org>
Vikash Garodia <vikash.garodia@oss.qualcomm.com> <quic_vgarodia@quicinc.com>
Vincent Mailhol <mailhol@kernel.org> <mailhol.vincent@wanadoo.fr>

View File

@@ -231,6 +231,8 @@ eventually gets pushed out to disk. This tunable is used to define when dirty
inode is old enough to be eligible for writeback by the kernel flusher threads.
And, it is also used as the interval to wakeup dirtytime_writeback thread.
Setting this to zero disables periodic dirtytime writeback.
dirty_writeback_centisecs
=========================

View File

@@ -88,7 +88,7 @@ patternProperties:
pcie1_clkreq, pcie1_wakeup, pmic0, pmic1, ptp, ptp_clk,
ptp_trig, pwm0, pwm1, pwm2, pwm3, rgmii, sdio0, sdio_sb, smi,
spi_cs1, spi_cs2, spi_cs3, spi_quad, uart1, uart2,
usb2_drvvbus1, usb32_drvvbus ]
usb2_drvvbus1, usb32_drvvbus0 ]
function:
enum: [ drvbus, emmc, gpio, i2c, jtag, led, mii, mii_err, onewire,

View File

@@ -15,12 +15,15 @@ description:
sound quallity, which is a new high efficiency, low
noise, constant large volume, 6th Smart K audio amplifier.
allOf:
- $ref: dai-common.yaml#
properties:
compatible:
const: awinic,aw87390
oneOf:
- enum:
- awinic,aw87390
- items:
- enum:
- anbernic,rgds-amp
- const: awinic,aw87391
reg:
maxItems: 1
@@ -40,10 +43,31 @@ required:
- compatible
- reg
- "#sound-dai-cells"
- awinic,audio-channel
unevaluatedProperties: false
allOf:
- $ref: dai-common.yaml#
- if:
properties:
compatible:
contains:
enum:
- awinic,aw87390
then:
required:
- awinic,audio-channel
- if:
properties:
compatible:
contains:
enum:
- anbernic,rgds-amp
then:
properties:
vdd-supply: true
examples:
- |
i2c {

View File

@@ -44,6 +44,7 @@ properties:
- items:
- enum:
- fsl,imx94-sai
- fsl,imx952-sai
- const: fsl,imx95-sai
reg:

View File

@@ -9260,7 +9260,6 @@ F: drivers/scsi/be2iscsi/
EMULEX 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER (be2net)
M: Ajit Khaparde <ajit.khaparde@broadcom.com>
M: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
M: Somnath Kotur <somnath.kotur@broadcom.com>
L: netdev@vger.kernel.org
S: Maintained
W: http://www.emulex.com

View File

@@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 19
SUBLEVEL = 0
EXTRAVERSION = -rc7
EXTRAVERSION = -rc8
NAME = Baby Opossum Posse
# *DOCUMENTATION*
@@ -1624,7 +1624,8 @@ MRPROPER_FILES += include/config include/generated \
certs/x509.genkey \
vmlinux-gdb.py \
rpmbuild \
rust/libmacros.so rust/libmacros.dylib
rust/libmacros.so rust/libmacros.dylib \
rust/libpin_init_internal.so rust/libpin_init_internal.dylib
# clean - Delete most, but leave enough to build external modules
#

View File

@@ -670,7 +670,6 @@ CONFIG_PINCTRL_LPASS_LPI=m
CONFIG_PINCTRL_SC7280_LPASS_LPI=m
CONFIG_PINCTRL_SM6115_LPASS_LPI=m
CONFIG_PINCTRL_SM8250_LPASS_LPI=m
CONFIG_PINCTRL_SM8350_LPASS_LPI=m
CONFIG_PINCTRL_SM8450_LPASS_LPI=m
CONFIG_PINCTRL_SC8280XP_LPASS_LPI=m
CONFIG_PINCTRL_SM8550_LPASS_LPI=m

View File

@@ -723,7 +723,7 @@ static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm)
dpage = pfn_to_page(uvmem_pfn);
dpage->zone_device_data = pvt;
zone_device_page_init(dpage, 0);
zone_device_page_init(dpage, &kvmppc_uvmem_pgmap, 0);
return dpage;
out_clear:
spin_lock(&kvmppc_uvmem_bitmap_lock);

View File

@@ -75,26 +75,12 @@ static u32 __init_or_module sifive_errata_probe(unsigned long archid,
return cpu_req_errata;
}
static void __init_or_module warn_miss_errata(u32 miss_errata)
{
int i;
pr_warn("----------------------------------------------------------------\n");
pr_warn("WARNING: Missing the following errata may cause potential issues\n");
for (i = 0; i < ERRATA_SIFIVE_NUMBER; i++)
if (miss_errata & 0x1 << i)
pr_warn("\tSiFive Errata[%d]:%s\n", i, errata_list[i].name);
pr_warn("Please enable the corresponding Kconfig to apply them\n");
pr_warn("----------------------------------------------------------------\n");
}
void sifive_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
unsigned long archid, unsigned long impid,
unsigned int stage)
{
struct alt_entry *alt;
u32 cpu_req_errata;
u32 cpu_apply_errata = 0;
u32 tmp;
BUILD_BUG_ON(ERRATA_SIFIVE_NUMBER >= RISCV_VENDOR_EXT_ALTERNATIVES_BASE);
@@ -118,10 +104,6 @@ void sifive_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
patch_text_nosync(ALT_OLD_PTR(alt), ALT_ALT_PTR(alt),
alt->alt_len);
mutex_unlock(&text_mutex);
cpu_apply_errata |= tmp;
}
}
if (stage != RISCV_ALTERNATIVES_MODULE &&
cpu_apply_errata != cpu_req_errata)
warn_miss_errata(cpu_req_errata - cpu_apply_errata);
}

View File

@@ -2,7 +2,7 @@
#ifndef __ASM_COMPAT_H
#define __ASM_COMPAT_H
#define COMPAT_UTS_MACHINE "riscv\0\0"
#define COMPAT_UTS_MACHINE "riscv32\0\0"
/*
* Architecture specific compatibility types

View File

@@ -20,7 +20,7 @@ extern void * const sys_call_table[];
extern void * const compat_sys_call_table[];
/*
* Only the low 32 bits of orig_r0 are meaningful, so we return int.
* Only the low 32 bits of orig_a0 are meaningful, so we return int.
* This importantly ignores the high bits on 64-bit, so comparisons
* sign-extend the low 32 bits.
*/

View File

@@ -145,14 +145,14 @@ struct arch_ext_priv {
long (*save)(struct pt_regs *regs, void __user *sc_vec);
};
struct arch_ext_priv arch_ext_list[] = {
static struct arch_ext_priv arch_ext_list[] = {
{
.magic = RISCV_V_MAGIC,
.save = &save_v_state,
},
};
const size_t nr_arch_exts = ARRAY_SIZE(arch_ext_list);
static const size_t nr_arch_exts = ARRAY_SIZE(arch_ext_list);
static long restore_sigcontext(struct pt_regs *regs,
struct sigcontext __user *sc)
@@ -297,7 +297,7 @@ static long setup_sigcontext(struct rt_sigframe __user *frame,
} else {
err |= __put_user(arch_ext->magic, &sc_ext_ptr->magic);
err |= __put_user(ext_size, &sc_ext_ptr->size);
sc_ext_ptr = (void *)sc_ext_ptr + ext_size;
sc_ext_ptr = (void __user *)sc_ext_ptr + ext_size;
}
}
/* Write zero to fp-reserved space and check it on restore_sigcontext */

View File

@@ -1662,6 +1662,7 @@ static void destroy_sysfs(struct rnbd_clt_dev *dev,
/* To avoid deadlock firstly remove itself */
sysfs_remove_file_self(&dev->kobj, sysfs_self);
kobject_del(&dev->kobj);
kobject_put(&dev->kobj);
}
}

View File

@@ -685,6 +685,8 @@ static int hci_uart_register_dev(struct hci_uart *hu)
return err;
}
set_bit(HCI_UART_PROTO_INIT, &hu->flags);
if (test_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags))
return 0;
@@ -712,8 +714,6 @@ static int hci_uart_set_proto(struct hci_uart *hu, int id)
hu->proto = p;
set_bit(HCI_UART_PROTO_INIT, &hu->flags);
err = hci_uart_register_dev(hu);
if (err) {
return err;

View File

@@ -142,6 +142,12 @@ static const struct of_device_id simple_pm_bus_of_match[] = {
{ .compatible = "simple-mfd", .data = ONLY_BUS },
{ .compatible = "isa", .data = ONLY_BUS },
{ .compatible = "arm,amba-bus", .data = ONLY_BUS },
{ .compatible = "fsl,ls1021a-scfg", },
{ .compatible = "fsl,ls1043a-scfg", },
{ .compatible = "fsl,ls1046a-scfg", },
{ .compatible = "fsl,ls1088a-isc", },
{ .compatible = "fsl,ls2080a-isc", },
{ .compatible = "fsl,lx2160a-isc", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, simple_pm_bus_of_match);

View File

@@ -263,6 +263,7 @@ static const struct of_device_id qcom_cpufreq_ipq806x_match_list[] __maybe_unuse
{ .compatible = "qcom,ipq8066", .data = (const void *)QCOM_ID_IPQ8066 },
{ .compatible = "qcom,ipq8068", .data = (const void *)QCOM_ID_IPQ8068 },
{ .compatible = "qcom,ipq8069", .data = (const void *)QCOM_ID_IPQ8069 },
{ /* sentinel */ }
};
static int qcom_cpufreq_ipq8064_name_version(struct device *cpu_dev,

View File

@@ -173,20 +173,14 @@ static void split_transaction_timeout_callback(struct timer_list *timer)
}
}
static void start_split_transaction_timeout(struct fw_transaction *t,
struct fw_card *card)
// card->transactions.lock should be acquired in advance for the linked list.
static void start_split_transaction_timeout(struct fw_transaction *t, unsigned int delta)
{
unsigned long delta;
if (list_empty(&t->link) || WARN_ON(t->is_split_transaction))
return;
t->is_split_transaction = true;
// NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
// local destination never runs in any type of IRQ context.
scoped_guard(spinlock_irqsave, &card->split_timeout.lock)
delta = card->split_timeout.jiffies;
mod_timer(&t->split_timeout_timer, jiffies + delta);
}
@@ -207,13 +201,20 @@ static void transmit_complete_callback(struct fw_packet *packet,
break;
case ACK_PENDING:
{
unsigned int delta;
// NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
// local destination never runs in any type of IRQ context.
scoped_guard(spinlock_irqsave, &card->split_timeout.lock) {
t->split_timeout_cycle =
compute_split_timeout_timestamp(card, packet->timestamp) & 0xffff;
delta = card->split_timeout.jiffies;
}
start_split_transaction_timeout(t, card);
// NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
// local destination never runs in any type of IRQ context.
scoped_guard(spinlock_irqsave, &card->transactions.lock)
start_split_transaction_timeout(t, delta);
break;
}
case ACK_BUSY_X:

View File

@@ -301,12 +301,10 @@ static struct brcmstb_gpio_bank *brcmstb_gpio_hwirq_to_bank(
struct brcmstb_gpio_priv *priv, irq_hw_number_t hwirq)
{
struct brcmstb_gpio_bank *bank;
int i = 0;
/* banks are in descending order */
list_for_each_entry_reverse(bank, &priv->bank_list, node) {
i += bank->chip.gc.ngpio;
if (hwirq < i)
list_for_each_entry(bank, &priv->bank_list, node) {
if (hwirq >= bank->chip.gc.offset &&
hwirq < (bank->chip.gc.offset + bank->chip.gc.ngpio))
return bank;
}
return NULL;

View File

@@ -799,10 +799,13 @@ static struct platform_device omap_mpuio_device = {
static inline void omap_mpuio_init(struct gpio_bank *bank)
{
platform_set_drvdata(&omap_mpuio_device, bank);
static bool registered;
if (platform_driver_register(&omap_mpuio_driver) == 0)
(void) platform_device_register(&omap_mpuio_device);
platform_set_drvdata(&omap_mpuio_device, bank);
if (!registered) {
(void)platform_device_register(&omap_mpuio_device);
registered = true;
}
}
/*---------------------------------------------------------------------*/
@@ -1575,13 +1578,24 @@ static struct platform_driver omap_gpio_driver = {
*/
static int __init omap_gpio_drv_reg(void)
{
return platform_driver_register(&omap_gpio_driver);
int ret;
ret = platform_driver_register(&omap_mpuio_driver);
if (ret)
return ret;
ret = platform_driver_register(&omap_gpio_driver);
if (ret)
platform_driver_unregister(&omap_mpuio_driver);
return ret;
}
postcore_initcall(omap_gpio_drv_reg);
static void __exit omap_gpio_exit(void)
{
platform_driver_unregister(&omap_gpio_driver);
platform_driver_unregister(&omap_mpuio_driver);
}
module_exit(omap_gpio_exit);

View File

@@ -914,6 +914,8 @@ static void pca953x_irq_shutdown(struct irq_data *d)
clear_bit(hwirq, chip->irq_trig_fall);
clear_bit(hwirq, chip->irq_trig_level_low);
clear_bit(hwirq, chip->irq_trig_level_high);
pca953x_irq_mask(d);
}
static void pca953x_irq_print_chip(struct irq_data *data, struct seq_file *p)

View File

@@ -18,7 +18,6 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -164,12 +163,6 @@ static int rockchip_gpio_set_direction(struct gpio_chip *chip,
unsigned long flags;
u32 data = input ? 0 : 1;
if (input)
pinctrl_gpio_direction_input(chip, offset);
else
pinctrl_gpio_direction_output(chip, offset);
raw_spin_lock_irqsave(&bank->slock, flags);
rockchip_gpio_writel_bit(bank, offset, data, bank->gpio_regs->port_ddr);
raw_spin_unlock_irqrestore(&bank->slock, flags);
@@ -593,7 +586,6 @@ static int rockchip_gpiolib_register(struct rockchip_pin_bank *bank)
gc->ngpio = bank->nr_pins;
gc->label = bank->name;
gc->parent = bank->dev;
gc->can_sleep = true;
ret = gpiochip_add_data(gc, bank);
if (ret) {

View File

@@ -35,7 +35,7 @@
struct sprd_gpio {
struct gpio_chip chip;
void __iomem *base;
spinlock_t lock;
raw_spinlock_t lock;
int irq;
};
@@ -54,7 +54,7 @@ static void sprd_gpio_update(struct gpio_chip *chip, unsigned int offset,
unsigned long flags;
u32 tmp;
spin_lock_irqsave(&sprd_gpio->lock, flags);
raw_spin_lock_irqsave(&sprd_gpio->lock, flags);
tmp = readl_relaxed(base + reg);
if (val)
@@ -63,7 +63,7 @@ static void sprd_gpio_update(struct gpio_chip *chip, unsigned int offset,
tmp &= ~BIT(SPRD_GPIO_BIT(offset));
writel_relaxed(tmp, base + reg);
spin_unlock_irqrestore(&sprd_gpio->lock, flags);
raw_spin_unlock_irqrestore(&sprd_gpio->lock, flags);
}
static int sprd_gpio_read(struct gpio_chip *chip, unsigned int offset, u16 reg)
@@ -236,7 +236,7 @@ static int sprd_gpio_probe(struct platform_device *pdev)
if (IS_ERR(sprd_gpio->base))
return PTR_ERR(sprd_gpio->base);
spin_lock_init(&sprd_gpio->lock);
raw_spin_lock_init(&sprd_gpio->lock);
sprd_gpio->chip.label = dev_name(&pdev->dev);
sprd_gpio->chip.ngpio = SPRD_GPIO_NR;

View File

@@ -1682,10 +1682,10 @@ static void gpio_virtuser_device_config_group_release(struct config_item *item)
{
struct gpio_virtuser_device *dev = to_gpio_virtuser_device(item);
guard(mutex)(&dev->lock);
if (gpio_virtuser_device_is_live(dev))
gpio_virtuser_device_deactivate(dev);
scoped_guard(mutex, &dev->lock) {
if (gpio_virtuser_device_is_live(dev))
gpio_virtuser_device_deactivate(dev);
}
mutex_destroy(&dev->lock);
ida_free(&gpio_virtuser_ida, dev->id);

View File

@@ -1104,6 +1104,7 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
unsigned int pin = agpio->pin_table[i];
struct acpi_gpio_connection *conn;
struct gpio_desc *desc;
u16 word, shift;
bool found;
mutex_lock(&achip->conn_lock);
@@ -1158,10 +1159,22 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
mutex_unlock(&achip->conn_lock);
if (function == ACPI_WRITE)
gpiod_set_raw_value_cansleep(desc, !!(*value & BIT(i)));
else
*value |= (u64)gpiod_get_raw_value_cansleep(desc) << i;
/*
* For the cases when OperationRegion() consists of more than
* 64 bits calculate the word and bit shift to use that one to
* access the value.
*/
word = i / 64;
shift = i % 64;
if (function == ACPI_WRITE) {
gpiod_set_raw_value_cansleep(desc, value[word] & BIT_ULL(shift));
} else {
if (gpiod_get_raw_value_cansleep(desc))
value[word] |= BIT_ULL(shift);
else
value[word] &= ~BIT_ULL(shift);
}
}
out:

View File

@@ -498,8 +498,13 @@ void amdgpu_gmc_filter_faults_remove(struct amdgpu_device *adev, uint64_t addr,
if (adev->irq.retry_cam_enabled)
return;
else if (adev->irq.ih1.ring_size)
ih = &adev->irq.ih1;
else if (adev->irq.ih_soft.enabled)
ih = &adev->irq.ih_soft;
else
return;
ih = &adev->irq.ih1;
/* Get the WPTR of the last entry in IH ring */
last_wptr = amdgpu_ih_get_wptr(adev, ih);
/* Order wptr with ring data. */

View File

@@ -235,7 +235,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
amdgpu_ring_ib_begin(ring);
if (ring->funcs->emit_gfx_shadow)
if (ring->funcs->emit_gfx_shadow && adev->gfx.cp_gfx_shadow)
amdgpu_ring_emit_gfx_shadow(ring, shadow_va, csa_va, gds_va,
init_shadow, vmid);
@@ -291,7 +291,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
fence_flags | AMDGPU_FENCE_FLAG_64BIT);
}
if (ring->funcs->emit_gfx_shadow && ring->funcs->init_cond_exec) {
if (ring->funcs->emit_gfx_shadow && ring->funcs->init_cond_exec &&
adev->gfx.cp_gfx_shadow) {
amdgpu_ring_emit_gfx_shadow(ring, 0, 0, 0, false, 0);
amdgpu_ring_init_cond_exec(ring, ring->cond_exe_gpu_addr);
}

View File

@@ -6879,7 +6879,7 @@ static int gfx_v10_0_kgq_init_queue(struct amdgpu_ring *ring, bool reset)
memcpy_toio(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
/* reset the ring */
ring->wptr = 0;
*ring->wptr_cpu_addr = 0;
atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
amdgpu_ring_clear_ring(ring);
}

View File

@@ -4201,7 +4201,7 @@ static int gfx_v11_0_kgq_init_queue(struct amdgpu_ring *ring, bool reset)
memcpy_toio(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
/* reset the ring */
ring->wptr = 0;
*ring->wptr_cpu_addr = 0;
atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
amdgpu_ring_clear_ring(ring);
}
@@ -6823,11 +6823,12 @@ static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring,
struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
bool use_mmio = false;
int r;
amdgpu_ring_reset_helper_begin(ring, timedout_fence);
r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false);
r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, use_mmio);
if (r) {
dev_warn(adev->dev, "reset via MES failed and try pipe reset %d\n", r);
@@ -6836,16 +6837,18 @@ static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring,
return r;
}
r = gfx_v11_0_kgq_init_queue(ring, true);
if (r) {
dev_err(adev->dev, "failed to init kgq\n");
return r;
}
if (use_mmio) {
r = gfx_v11_0_kgq_init_queue(ring, true);
if (r) {
dev_err(adev->dev, "failed to init kgq\n");
return r;
}
r = amdgpu_mes_map_legacy_queue(adev, ring);
if (r) {
dev_err(adev->dev, "failed to remap kgq\n");
return r;
r = amdgpu_mes_map_legacy_queue(adev, ring);
if (r) {
dev_err(adev->dev, "failed to remap kgq\n");
return r;
}
}
return amdgpu_ring_reset_helper_end(ring, timedout_fence);

View File

@@ -3079,7 +3079,7 @@ static int gfx_v12_0_kgq_init_queue(struct amdgpu_ring *ring, bool reset)
memcpy_toio(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
/* reset the ring */
ring->wptr = 0;
*ring->wptr_cpu_addr = 0;
atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
amdgpu_ring_clear_ring(ring);
}
@@ -5297,11 +5297,12 @@ static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring,
struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
bool use_mmio = false;
int r;
amdgpu_ring_reset_helper_begin(ring, timedout_fence);
r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false);
r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, use_mmio);
if (r) {
dev_warn(adev->dev, "reset via MES failed and try pipe reset %d\n", r);
r = gfx_v12_reset_gfx_pipe(ring);
@@ -5309,16 +5310,18 @@ static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring,
return r;
}
r = gfx_v12_0_kgq_init_queue(ring, true);
if (r) {
dev_err(adev->dev, "failed to init kgq\n");
return r;
}
if (use_mmio) {
r = gfx_v12_0_kgq_init_queue(ring, true);
if (r) {
dev_err(adev->dev, "failed to init kgq\n");
return r;
}
r = amdgpu_mes_map_legacy_queue(adev, ring);
if (r) {
dev_err(adev->dev, "failed to remap kgq\n");
return r;
r = amdgpu_mes_map_legacy_queue(adev, ring);
if (r) {
dev_err(adev->dev, "failed to remap kgq\n");
return r;
}
}
return amdgpu_ring_reset_helper_end(ring, timedout_fence);

View File

@@ -225,7 +225,13 @@ static u32 soc21_get_config_memsize(struct amdgpu_device *adev)
static u32 soc21_get_xclk(struct amdgpu_device *adev)
{
return adev->clock.spll.reference_freq;
u32 reference_clock = adev->clock.spll.reference_freq;
/* reference clock is actually 99.81 Mhz rather than 100 Mhz */
if ((adev->flags & AMD_IS_APU) && reference_clock == 10000)
return 9981;
return reference_clock;
}

View File

@@ -217,7 +217,7 @@ svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn)
page = pfn_to_page(pfn);
svm_range_bo_ref(prange->svm_bo);
page->zone_device_data = prange->svm_bo;
zone_device_page_init(page, 0);
zone_device_page_init(page, page_pgmap(page), 0);
}
static void

View File

@@ -7754,10 +7754,12 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
/* Cancel and flush any pending HDMI HPD debounce work */
cancel_delayed_work_sync(&aconnector->hdmi_hpd_debounce_work);
if (aconnector->hdmi_prev_sink) {
dc_sink_release(aconnector->hdmi_prev_sink);
aconnector->hdmi_prev_sink = NULL;
if (aconnector->hdmi_hpd_debounce_delay_ms) {
cancel_delayed_work_sync(&aconnector->hdmi_hpd_debounce_work);
if (aconnector->hdmi_prev_sink) {
dc_sink_release(aconnector->hdmi_prev_sink);
aconnector->hdmi_prev_sink = NULL;
}
}
if (aconnector->bl_idx != -1) {

View File

@@ -80,15 +80,15 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev,
enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
bool is_vcn = block_type == AMD_IP_BLOCK_TYPE_VCN;
mutex_lock(&adev->pm.mutex);
if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state &&
(!is_vcn || adev->vcn.num_vcn_inst == 1)) {
dev_dbg(adev->dev, "IP block%d already in the target %s state!",
block_type, gate ? "gate" : "ungate");
return 0;
goto out_unlock;
}
mutex_lock(&adev->pm.mutex);
switch (block_type) {
case AMD_IP_BLOCK_TYPE_UVD:
case AMD_IP_BLOCK_TYPE_VCE:
@@ -115,6 +115,7 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev,
if (!ret)
atomic_set(&adev->pm.pwr_state[block_type], pwr_state);
out_unlock:
mutex_unlock(&adev->pm.mutex);
return ret;

View File

@@ -56,6 +56,7 @@
#define SMUQ10_TO_UINT(x) ((x) >> 10)
#define SMUQ10_FRAC(x) ((x) & 0x3ff)
#define SMUQ10_ROUND(x) ((SMUQ10_TO_UINT(x)) + ((SMUQ10_FRAC(x)) >= 0x200))
#define SMU_V13_SOFT_FREQ_ROUND(x) ((x) + 1)
extern const int pmfw_decoded_link_speed[5];
extern const int pmfw_decoded_link_width[7];

View File

@@ -57,6 +57,7 @@ extern const int decoded_link_width[8];
#define DECODE_GEN_SPEED(gen_speed_idx) (decoded_link_speed[gen_speed_idx])
#define DECODE_LANE_WIDTH(lane_width_idx) (decoded_link_width[lane_width_idx])
#define SMU_V14_SOFT_FREQ_ROUND(x) ((x) + 1)
struct smu_14_0_max_sustainable_clocks {
uint32_t display_clock;

View File

@@ -1555,6 +1555,7 @@ int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu,
return clk_id;
if (max > 0) {
max = SMU_V13_SOFT_FREQ_ROUND(max);
if (automatic)
param = (uint32_t)((clk_id << 16) | 0xffff);
else

View File

@@ -1178,6 +1178,7 @@ int smu_v14_0_set_soft_freq_limited_range(struct smu_context *smu,
return clk_id;
if (max > 0) {
max = SMU_V14_SOFT_FREQ_ROUND(max);
if (automatic)
param = (uint32_t)((clk_id << 16) | 0xffff);
else

View File

@@ -960,16 +960,21 @@ int drm_gem_change_handle_ioctl(struct drm_device *dev, void *data,
{
struct drm_gem_change_handle *args = data;
struct drm_gem_object *obj;
int ret;
int handle, ret;
if (!drm_core_check_feature(dev, DRIVER_GEM))
return -EOPNOTSUPP;
/* idr_alloc() limitation. */
if (args->new_handle > INT_MAX)
return -EINVAL;
handle = args->new_handle;
obj = drm_gem_object_lookup(file_priv, args->handle);
if (!obj)
return -ENOENT;
if (args->handle == args->new_handle) {
if (args->handle == handle) {
ret = 0;
goto out;
}
@@ -977,18 +982,19 @@ int drm_gem_change_handle_ioctl(struct drm_device *dev, void *data,
mutex_lock(&file_priv->prime.lock);
spin_lock(&file_priv->table_lock);
ret = idr_alloc(&file_priv->object_idr, obj,
args->new_handle, args->new_handle + 1, GFP_NOWAIT);
ret = idr_alloc(&file_priv->object_idr, obj, handle, handle + 1,
GFP_NOWAIT);
spin_unlock(&file_priv->table_lock);
if (ret < 0)
goto out_unlock;
if (obj->dma_buf) {
ret = drm_prime_add_buf_handle(&file_priv->prime, obj->dma_buf, args->new_handle);
ret = drm_prime_add_buf_handle(&file_priv->prime, obj->dma_buf,
handle);
if (ret < 0) {
spin_lock(&file_priv->table_lock);
idr_remove(&file_priv->object_idr, args->new_handle);
idr_remove(&file_priv->object_idr, handle);
spin_unlock(&file_priv->table_lock);
goto out_unlock;
}

View File

@@ -197,7 +197,7 @@ static void drm_pagemap_get_devmem_page(struct page *page,
struct drm_pagemap_zdd *zdd)
{
page->zone_device_data = drm_pagemap_zdd_get(zdd);
zone_device_page_init(page, 0);
zone_device_page_init(page, page_pgmap(page), 0);
}
/**

View File

@@ -528,6 +528,13 @@ static const struct component_ops imx_tve_ops = {
.bind = imx_tve_bind,
};
static void imx_tve_put_device(void *_dev)
{
struct device *dev = _dev;
put_device(dev);
}
static int imx_tve_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -549,6 +556,12 @@ static int imx_tve_probe(struct platform_device *pdev)
if (ddc_node) {
tve->ddc = of_find_i2c_adapter_by_node(ddc_node);
of_node_put(ddc_node);
if (tve->ddc) {
ret = devm_add_action_or_reset(dev, imx_tve_put_device,
&tve->ddc->dev);
if (ret)
return ret;
}
}
tve->mode = of_get_tve_mode(np);

View File

@@ -501,8 +501,6 @@ static const struct adreno_reglist a690_hwcg[] = {
{REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
{REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
{REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
{REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL, 0x10111},
{REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL, 0x5555},
{}
};

View File

@@ -425,7 +425,7 @@ nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm, bool is_large)
order = ilog2(DMEM_CHUNK_NPAGES);
}
zone_device_folio_init(folio, order);
zone_device_folio_init(folio, page_pgmap(folio_page(folio, 0)), order);
return page;
}

View File

@@ -6,6 +6,7 @@ config DRM_TYR
depends on RUST
depends on ARM || ARM64 || COMPILE_TEST
depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE
depends on COMMON_CLK
default n
help
Rust DRM driver for ARM Mali CSF-based GPUs.

View File

@@ -347,11 +347,10 @@ static bool is_bound(struct xe_config_group_device *dev)
return false;
ret = pci_get_drvdata(pdev);
pci_dev_put(pdev);
if (ret)
pci_dbg(pdev, "Already bound to driver\n");
pci_dev_put(pdev);
return ret;
}

View File

@@ -984,8 +984,6 @@ void xe_device_remove(struct xe_device *xe)
{
xe_display_unregister(xe);
xe_nvm_fini(xe);
drm_dev_unplug(&xe->drm);
xe_bo_pci_dev_remove_all(xe);

View File

@@ -190,9 +190,9 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
goto err_syncs;
}
if (xe_exec_queue_is_parallel(q)) {
err = copy_from_user(addresses, addresses_user, sizeof(u64) *
q->width);
if (args->num_batch_buffer && xe_exec_queue_is_parallel(q)) {
err = copy_from_user(addresses, addresses_user,
sizeof(u64) * q->width);
if (err) {
err = -EFAULT;
goto err_syncs;

View File

@@ -1185,7 +1185,7 @@ static ssize_t setup_invalidate_state_cache_wa(struct xe_lrc *lrc,
return -ENOSPC;
*cmd++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(1);
*cmd++ = CS_DEBUG_MODE1(0).addr;
*cmd++ = CS_DEBUG_MODE2(0).addr;
*cmd++ = _MASKED_BIT_ENABLE(INSTRUCTION_STATE_CACHE_INVALIDATE);
return cmd - batch;

View File

@@ -83,6 +83,27 @@ static bool xe_nvm_writable_override(struct xe_device *xe)
return writable_override;
}
static void xe_nvm_fini(void *arg)
{
struct xe_device *xe = arg;
struct intel_dg_nvm_dev *nvm = xe->nvm;
if (!xe->info.has_gsc_nvm)
return;
/* No access to internal NVM from VFs */
if (IS_SRIOV_VF(xe))
return;
/* Nvm pointer should not be NULL here */
if (WARN_ON(!nvm))
return;
auxiliary_device_delete(&nvm->aux_dev);
auxiliary_device_uninit(&nvm->aux_dev);
xe->nvm = NULL;
}
int xe_nvm_init(struct xe_device *xe)
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
@@ -132,39 +153,17 @@ int xe_nvm_init(struct xe_device *xe)
ret = auxiliary_device_init(aux_dev);
if (ret) {
drm_err(&xe->drm, "xe-nvm aux init failed %d\n", ret);
goto err;
kfree(nvm);
xe->nvm = NULL;
return ret;
}
ret = auxiliary_device_add(aux_dev);
if (ret) {
drm_err(&xe->drm, "xe-nvm aux add failed %d\n", ret);
auxiliary_device_uninit(aux_dev);
goto err;
xe->nvm = NULL;
return ret;
}
return 0;
err:
kfree(nvm);
xe->nvm = NULL;
return ret;
}
void xe_nvm_fini(struct xe_device *xe)
{
struct intel_dg_nvm_dev *nvm = xe->nvm;
if (!xe->info.has_gsc_nvm)
return;
/* No access to internal NVM from VFs */
if (IS_SRIOV_VF(xe))
return;
/* Nvm pointer should not be NULL here */
if (WARN_ON(!nvm))
return;
auxiliary_device_delete(&nvm->aux_dev);
auxiliary_device_uninit(&nvm->aux_dev);
xe->nvm = NULL;
return devm_add_action_or_reset(xe->drm.dev, xe_nvm_fini, xe);
}

View File

@@ -10,6 +10,4 @@ struct xe_device;
int xe_nvm_init(struct xe_device *xe);
void xe_nvm_fini(struct xe_device *xe);
#endif

View File

@@ -342,7 +342,6 @@ static const struct xe_device_desc lnl_desc = {
.has_display = true,
.has_flat_ccs = 1,
.has_pxp = true,
.has_mem_copy_instr = true,
.max_gt_per_tile = 2,
.needs_scratch = true,
.va_bits = 48,
@@ -363,7 +362,6 @@ static const struct xe_device_desc bmg_desc = {
.has_heci_cscfi = 1,
.has_late_bind = true,
.has_sriov = true,
.has_mem_copy_instr = true,
.max_gt_per_tile = 2,
.needs_scratch = true,
.subplatforms = (const struct xe_subplatform_desc[]) {
@@ -380,7 +378,6 @@ static const struct xe_device_desc ptl_desc = {
.has_display = true,
.has_flat_ccs = 1,
.has_sriov = true,
.has_mem_copy_instr = true,
.max_gt_per_tile = 2,
.needs_scratch = true,
.needs_shared_vf_gt_wq = true,
@@ -393,7 +390,6 @@ static const struct xe_device_desc nvls_desc = {
.dma_mask_size = 46,
.has_display = true,
.has_flat_ccs = 1,
.has_mem_copy_instr = true,
.max_gt_per_tile = 2,
.require_force_probe = true,
.va_bits = 48,
@@ -675,7 +671,6 @@ static int xe_info_init_early(struct xe_device *xe,
xe->info.has_pxp = desc->has_pxp;
xe->info.has_sriov = xe_configfs_primary_gt_allowed(to_pci_dev(xe->drm.dev)) &&
desc->has_sriov;
xe->info.has_mem_copy_instr = desc->has_mem_copy_instr;
xe->info.skip_guc_pc = desc->skip_guc_pc;
xe->info.skip_mtcfg = desc->skip_mtcfg;
xe->info.skip_pcode = desc->skip_pcode;
@@ -864,6 +859,7 @@ static int xe_info_init(struct xe_device *xe,
xe->info.has_range_tlb_inval = graphics_desc->has_range_tlb_inval;
xe->info.has_usm = graphics_desc->has_usm;
xe->info.has_64bit_timestamp = graphics_desc->has_64bit_timestamp;
xe->info.has_mem_copy_instr = GRAPHICS_VER(xe) >= 20;
xe_info_probe_tile_count(xe);

View File

@@ -46,7 +46,6 @@ struct xe_device_desc {
u8 has_late_bind:1;
u8 has_llc:1;
u8 has_mbx_power_limits:1;
u8 has_mem_copy_instr:1;
u8 has_pxp:1;
u8 has_sriov:1;
u8 needs_scratch:1;

View File

@@ -1078,6 +1078,9 @@ static int tegra241_vcmdq_hw_init_user(struct tegra241_vcmdq *vcmdq)
{
char header[64];
/* Reset VCMDQ */
tegra241_vcmdq_hw_deinit(vcmdq);
/* Configure the vcmdq only; User space does the enabling */
writeq_relaxed(vcmdq->cmdq.q.q_base, REG_VCMDQ_PAGE1(vcmdq, BASE));

View File

@@ -931,6 +931,8 @@ static __maybe_unused int __unmap_range(struct pt_range *range, void *arg,
struct pt_table_p *table)
{
struct pt_state pts = pt_init(range, level, table);
unsigned int flush_start_index = UINT_MAX;
unsigned int flush_end_index = UINT_MAX;
struct pt_unmap_args *unmap = arg;
unsigned int num_oas = 0;
unsigned int start_index;
@@ -986,6 +988,9 @@ static __maybe_unused int __unmap_range(struct pt_range *range, void *arg,
iommu_pages_list_add(&unmap->free_list,
pts.table_lower);
pt_clear_entries(&pts, ilog2(1));
if (pts.index < flush_start_index)
flush_start_index = pts.index;
flush_end_index = pts.index + 1;
}
pts.index++;
} else {
@@ -999,7 +1004,10 @@ start_oa:
num_contig_lg2 = pt_entry_num_contig_lg2(&pts);
pt_clear_entries(&pts, num_contig_lg2);
num_oas += log2_to_int(num_contig_lg2);
if (pts.index < flush_start_index)
flush_start_index = pts.index;
pts.index += log2_to_int(num_contig_lg2);
flush_end_index = pts.index;
}
if (pts.index >= pts.end_index)
break;
@@ -1007,7 +1015,8 @@ start_oa:
} while (true);
unmap->unmapped += log2_mul(num_oas, pt_table_item_lg2sz(&pts));
flush_writes_range(&pts, start_index, pts.index);
if (flush_start_index != flush_end_index)
flush_writes_range(&pts, flush_start_index, flush_end_index);
return ret;
}

View File

@@ -289,6 +289,7 @@ static void batch_clear(struct pfn_batch *batch)
batch->end = 0;
batch->pfns[0] = 0;
batch->npfns[0] = 0;
batch->kind = 0;
}
/*

View File

@@ -168,40 +168,34 @@ ls_extirq_parse_map(struct ls_extirq_data *priv, struct device_node *node)
return 0;
}
static int __init
ls_extirq_of_init(struct device_node *node, struct device_node *parent)
static int ls_extirq_probe(struct platform_device *pdev)
{
struct irq_domain *domain, *parent_domain;
struct device_node *node, *parent;
struct device *dev = &pdev->dev;
struct ls_extirq_data *priv;
int ret;
node = dev->of_node;
parent = of_irq_find_parent(node);
if (!parent)
return dev_err_probe(dev, -ENODEV, "Failed to get IRQ parent node\n");
parent_domain = irq_find_host(parent);
if (!parent_domain) {
pr_err("Cannot find parent domain\n");
ret = -ENODEV;
goto err_irq_find_host;
}
if (!parent_domain)
return dev_err_probe(dev, -EPROBE_DEFER, "Cannot find parent domain\n");
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
ret = -ENOMEM;
goto err_alloc_priv;
}
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return dev_err_probe(dev, -ENOMEM, "Failed to allocate memory\n");
/*
* All extirq OF nodes are under a scfg/syscon node with
* the 'ranges' property
*/
priv->intpcr = of_iomap(node, 0);
if (!priv->intpcr) {
pr_err("Cannot ioremap OF node %pOF\n", node);
ret = -ENOMEM;
goto err_iomap;
}
priv->intpcr = devm_of_iomap(dev, node, 0, NULL);
if (!priv->intpcr)
return dev_err_probe(dev, -ENOMEM, "Cannot ioremap OF node %pOF\n", node);
ret = ls_extirq_parse_map(priv, node);
if (ret)
goto err_parse_map;
return dev_err_probe(dev, ret, "Failed to parse IRQ map\n");
priv->big_endian = of_device_is_big_endian(node->parent);
priv->is_ls1021a_or_ls1043a = of_device_is_compatible(node, "fsl,ls1021a-extirq") ||
@@ -210,23 +204,26 @@ ls_extirq_of_init(struct device_node *node, struct device_node *parent)
domain = irq_domain_create_hierarchy(parent_domain, 0, priv->nirq, of_fwnode_handle(node),
&extirq_domain_ops, priv);
if (!domain) {
ret = -ENOMEM;
goto err_add_hierarchy;
}
if (!domain)
return dev_err_probe(dev, -ENOMEM, "Failed to add IRQ domain\n");
return 0;
err_add_hierarchy:
err_parse_map:
iounmap(priv->intpcr);
err_iomap:
kfree(priv);
err_alloc_priv:
err_irq_find_host:
return ret;
}
IRQCHIP_DECLARE(ls1021a_extirq, "fsl,ls1021a-extirq", ls_extirq_of_init);
IRQCHIP_DECLARE(ls1043a_extirq, "fsl,ls1043a-extirq", ls_extirq_of_init);
IRQCHIP_DECLARE(ls1088a_extirq, "fsl,ls1088a-extirq", ls_extirq_of_init);
static const struct of_device_id ls_extirq_dt_ids[] = {
{ .compatible = "fsl,ls1021a-extirq" },
{ .compatible = "fsl,ls1043a-extirq" },
{ .compatible = "fsl,ls1088a-extirq" },
{}
};
MODULE_DEVICE_TABLE(of, ls_extirq_dt_ids);
static struct platform_driver ls_extirq_driver = {
.probe = ls_extirq_probe,
.driver = {
.name = "ls-extirq",
.of_match_table = ls_extirq_dt_ids,
}
};
builtin_platform_driver(ls_extirq_driver);

View File

@@ -1107,17 +1107,13 @@ static void detached_dev_do_request(struct bcache_device *d,
if (bio_op(orig_bio) == REQ_OP_DISCARD &&
!bdev_max_discard_sectors(dc->bdev)) {
bio_end_io_acct(orig_bio, start_time);
bio_endio(orig_bio);
return;
}
clone_bio = bio_alloc_clone(dc->bdev, orig_bio, GFP_NOIO,
&d->bio_detached);
if (!clone_bio) {
orig_bio->bi_status = BLK_STS_RESOURCE;
bio_endio(orig_bio);
return;
}
ddip = container_of(clone_bio, struct detached_dev_io_private, bio);
/* Count on the bcache device */

View File

@@ -215,7 +215,7 @@ static const struct spinand_info esmt_c8_spinand_table[] = {
SPINAND_FACT_OTP_INFO(2, 0, &f50l1g41lb_fact_otp_ops)),
SPINAND_INFO("F50D1G41LB",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x11, 0x7f,
0x7f),
0x7f, 0x7f),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(1, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,

View File

@@ -2202,11 +2202,6 @@ skip_mac_set:
unblock_netpoll_tx();
}
/* broadcast mode uses the all_slaves to loop through slaves. */
if (bond_mode_can_use_xmit_hash(bond) ||
BOND_MODE(bond) == BOND_MODE_BROADCAST)
bond_update_slave_arr(bond, NULL);
if (!slave_dev->netdev_ops->ndo_bpf ||
!slave_dev->netdev_ops->ndo_xdp_xmit) {
if (bond->xdp_prog) {
@@ -2240,6 +2235,11 @@ skip_mac_set:
bpf_prog_inc(bond->xdp_prog);
}
/* broadcast mode uses the all_slaves to loop through slaves. */
if (bond_mode_can_use_xmit_hash(bond) ||
BOND_MODE(bond) == BOND_MODE_BROADCAST)
bond_update_slave_arr(bond, NULL);
bond_xdp_set_features(bond_dev);
slave_info(bond_dev, slave_dev, "Enslaving as %s interface with %s link\n",
@@ -3047,8 +3047,8 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32
__func__, &sip);
return;
}
slave->last_rx = jiffies;
slave->target_last_arp_rx[i] = jiffies;
WRITE_ONCE(slave->last_rx, jiffies);
WRITE_ONCE(slave->target_last_arp_rx[i], jiffies);
}
static int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
@@ -3267,8 +3267,8 @@ static void bond_validate_na(struct bonding *bond, struct slave *slave,
__func__, saddr);
return;
}
slave->last_rx = jiffies;
slave->target_last_arp_rx[i] = jiffies;
WRITE_ONCE(slave->last_rx, jiffies);
WRITE_ONCE(slave->target_last_arp_rx[i], jiffies);
}
static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond,
@@ -3338,7 +3338,7 @@ int bond_rcv_validate(const struct sk_buff *skb, struct bonding *bond,
(slave_do_arp_validate_only(bond) && is_ipv6) ||
#endif
!slave_do_arp_validate_only(bond))
slave->last_rx = jiffies;
WRITE_ONCE(slave->last_rx, jiffies);
return RX_HANDLER_ANOTHER;
} else if (is_arp) {
return bond_arp_rcv(skb, bond, slave);
@@ -3406,7 +3406,7 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
if (slave->link != BOND_LINK_UP) {
if (bond_time_in_interval(bond, last_tx, 1) &&
bond_time_in_interval(bond, slave->last_rx, 1)) {
bond_time_in_interval(bond, READ_ONCE(slave->last_rx), 1)) {
bond_propose_link_state(slave, BOND_LINK_UP);
slave_state_changed = 1;
@@ -3430,8 +3430,10 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
* when the source ip is 0, so don't take the link down
* if we don't know our ip yet
*/
if (!bond_time_in_interval(bond, last_tx, bond->params.missed_max) ||
!bond_time_in_interval(bond, slave->last_rx, bond->params.missed_max)) {
if (!bond_time_in_interval(bond, last_tx,
bond->params.missed_max) ||
!bond_time_in_interval(bond, READ_ONCE(slave->last_rx),
bond->params.missed_max)) {
bond_propose_link_state(slave, BOND_LINK_DOWN);
slave_state_changed = 1;

View File

@@ -1152,7 +1152,7 @@ static void _bond_options_arp_ip_target_set(struct bonding *bond, int slot,
if (slot >= 0 && slot < BOND_MAX_ARP_TARGETS) {
bond_for_each_slave(bond, slave, iter)
slave->target_last_arp_rx[slot] = last_rx;
WRITE_ONCE(slave->target_last_arp_rx[slot], last_rx);
targets[slot] = target;
}
}
@@ -1221,8 +1221,8 @@ static int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target)
bond_for_each_slave(bond, slave, iter) {
targets_rx = slave->target_last_arp_rx;
for (i = ind; (i < BOND_MAX_ARP_TARGETS-1) && targets[i+1]; i++)
targets_rx[i] = targets_rx[i+1];
targets_rx[i] = 0;
WRITE_ONCE(targets_rx[i], READ_ONCE(targets_rx[i+1]));
WRITE_ONCE(targets_rx[i], 0);
}
for (i = ind; (i < BOND_MAX_ARP_TARGETS-1) && targets[i+1]; i++)
targets[i] = targets[i+1];
@@ -1377,7 +1377,7 @@ static void _bond_options_ns_ip6_target_set(struct bonding *bond, int slot,
if (slot >= 0 && slot < BOND_MAX_NS_TARGETS) {
bond_for_each_slave(bond, slave, iter) {
slave->target_last_arp_rx[slot] = last_rx;
WRITE_ONCE(slave->target_last_arp_rx[slot], last_rx);
slave_set_ns_maddr(bond, slave, target, &targets[slot]);
}
targets[slot] = *target;

View File

@@ -1099,7 +1099,7 @@ static int at91_can_probe(struct platform_device *pdev)
if (IS_ERR(transceiver)) {
err = PTR_ERR(transceiver);
dev_err_probe(&pdev->dev, err, "failed to get phy\n");
goto exit_iounmap;
goto exit_free;
}
dev->netdev_ops = &at91_netdev_ops;

View File

@@ -610,7 +610,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
{
struct gs_usb *parent = urb->context;
struct gs_can *dev;
struct net_device *netdev;
struct net_device *netdev = NULL;
int rc;
struct net_device_stats *stats;
struct gs_host_frame *hf = urb->transfer_buffer;
@@ -768,7 +768,7 @@ device_detach:
}
} else if (rc != -ESHUTDOWN && net_ratelimit()) {
netdev_info(netdev, "failed to re-submit IN URB: %pe\n",
ERR_PTR(urb->status));
ERR_PTR(rc));
}
}

View File

@@ -682,21 +682,22 @@ static int yt921x_read_mib(struct yt921x_priv *priv, int port)
const struct yt921x_mib_desc *desc = &yt921x_mib_descs[i];
u32 reg = YT921X_MIBn_DATA0(port) + desc->offset;
u64 *valp = &((u64 *)mib)[i];
u64 val = *valp;
u32 val0;
u32 val1;
u64 val;
res = yt921x_reg_read(priv, reg, &val0);
if (res)
break;
if (desc->size <= 1) {
if (val < (u32)val)
/* overflow */
val += (u64)U32_MAX + 1;
val &= ~U32_MAX;
val |= val0;
u64 old_val = *valp;
val = (old_val & ~(u64)U32_MAX) | val0;
if (val < old_val)
val += 1ull << 32;
} else {
u32 val1;
res = yt921x_reg_read(priv, reg + 4, &val1);
if (res)
break;

View File

@@ -1261,7 +1261,7 @@ struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv,
netdev_err(intf->ndev, "invalid PHY mode: %s for port %d\n",
phy_modes(intf->phy_interface), intf->port);
ret = -EINVAL;
goto err_free_netdev;
goto err_deregister_fixed_link;
}
ret = of_get_ethdev_address(ndev_dn, ndev);
@@ -1286,6 +1286,9 @@ struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv,
return intf;
err_deregister_fixed_link:
if (of_phy_is_fixed_link(ndev_dn))
of_phy_deregister_fixed_link(ndev_dn);
err_free_netdev:
free_netdev(ndev);
err:

View File

@@ -1206,6 +1206,11 @@ static inline bool gve_supports_xdp_xmit(struct gve_priv *priv)
}
}
static inline bool gve_is_clock_enabled(struct gve_priv *priv)
{
return priv->nic_ts_report;
}
/* gqi napi handler defined in gve_main.c */
int gve_napi_poll(struct napi_struct *napi, int budget);

View File

@@ -938,7 +938,7 @@ static int gve_get_ts_info(struct net_device *netdev,
ethtool_op_get_ts_info(netdev, info);
if (priv->nic_timestamp_supported) {
if (gve_is_clock_enabled(priv)) {
info->so_timestamping |= SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;

View File

@@ -680,10 +680,12 @@ static int gve_setup_device_resources(struct gve_priv *priv)
}
}
err = gve_init_clock(priv);
if (err) {
dev_err(&priv->pdev->dev, "Failed to init clock");
goto abort_with_ptype_lut;
if (priv->nic_timestamp_supported) {
err = gve_init_clock(priv);
if (err) {
dev_warn(&priv->pdev->dev, "Failed to init clock, continuing without PTP support");
err = 0;
}
}
err = gve_init_rss_config(priv, priv->rx_cfg.num_queues);
@@ -2183,7 +2185,7 @@ static int gve_set_ts_config(struct net_device *dev,
}
if (kernel_config->rx_filter != HWTSTAMP_FILTER_NONE) {
if (!priv->nic_ts_report) {
if (!gve_is_clock_enabled(priv)) {
NL_SET_ERR_MSG_MOD(extack,
"RX timestamping is not supported");
kernel_config->rx_filter = HWTSTAMP_FILTER_NONE;

View File

@@ -70,11 +70,6 @@ static int gve_ptp_init(struct gve_priv *priv)
struct gve_ptp *ptp;
int err;
if (!priv->nic_timestamp_supported) {
dev_dbg(&priv->pdev->dev, "Device does not support PTP\n");
return -EOPNOTSUPP;
}
priv->ptp = kzalloc(sizeof(*priv->ptp), GFP_KERNEL);
if (!priv->ptp)
return -ENOMEM;
@@ -116,9 +111,6 @@ int gve_init_clock(struct gve_priv *priv)
{
int err;
if (!priv->nic_timestamp_supported)
return 0;
err = gve_ptp_init(priv);
if (err)
return err;

View File

@@ -484,7 +484,7 @@ int gve_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
{
const struct gve_xdp_buff *ctx = (void *)_ctx;
if (!ctx->gve->nic_ts_report)
if (!gve_is_clock_enabled(ctx->gve))
return -ENODATA;
if (!(ctx->compl_desc->ts_sub_nsecs_low & GVE_DQO_RX_HWTSTAMP_VALID))

View File

@@ -2783,12 +2783,14 @@ void ice_vsi_set_napi_queues(struct ice_vsi *vsi)
ASSERT_RTNL();
ice_for_each_rxq(vsi, q_idx)
netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_RX,
&vsi->rx_rings[q_idx]->q_vector->napi);
if (vsi->rx_rings[q_idx] && vsi->rx_rings[q_idx]->q_vector)
netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_RX,
&vsi->rx_rings[q_idx]->q_vector->napi);
ice_for_each_txq(vsi, q_idx)
netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_TX,
&vsi->tx_rings[q_idx]->q_vector->napi);
if (vsi->tx_rings[q_idx] && vsi->tx_rings[q_idx]->q_vector)
netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_TX,
&vsi->tx_rings[q_idx]->q_vector->napi);
/* Also set the interrupt number for the NAPI */
ice_for_each_q_vector(vsi, v_idx) {
struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];

View File

@@ -6982,7 +6982,6 @@ void ice_update_vsi_stats(struct ice_vsi *vsi)
cur_ns->rx_errors = pf->stats.crc_errors +
pf->stats.illegal_bytes +
pf->stats.rx_undersize +
pf->hw_csum_rx_error +
pf->stats.rx_jabber +
pf->stats.rx_fragments +
pf->stats.rx_oversize;

View File

@@ -11468,20 +11468,17 @@ static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter)
*/
static int ixgbe_recovery_probe(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
struct ixgbe_hw *hw = &adapter->hw;
bool disable_dev;
int err = -EIO;
if (hw->mac.type != ixgbe_mac_e610)
goto clean_up_probe;
return err;
ixgbe_get_hw_control(adapter);
mutex_init(&hw->aci.lock);
err = ixgbe_get_flash_data(&adapter->hw);
if (err)
goto shutdown_aci;
goto err_release_hw_control;
timer_setup(&adapter->service_timer, ixgbe_service_timer, 0);
INIT_WORK(&adapter->service_task, ixgbe_recovery_service_task);
@@ -11504,16 +11501,8 @@ static int ixgbe_recovery_probe(struct ixgbe_adapter *adapter)
devl_unlock(adapter->devlink);
return 0;
shutdown_aci:
mutex_destroy(&adapter->hw.aci.lock);
err_release_hw_control:
ixgbe_release_hw_control(adapter);
clean_up_probe:
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev);
devlink_free(adapter->devlink);
pci_release_mem_regions(pdev);
if (disable_dev)
pci_disable_device(pdev);
return err;
}
@@ -11655,8 +11644,13 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_sw_init;
if (ixgbe_check_fw_error(adapter))
return ixgbe_recovery_probe(adapter);
if (ixgbe_check_fw_error(adapter)) {
err = ixgbe_recovery_probe(adapter);
if (err)
goto err_sw_init;
return 0;
}
if (adapter->hw.mac.type == ixgbe_mac_e610) {
err = ixgbe_get_caps(&adapter->hw);

View File

@@ -1389,7 +1389,7 @@ int mvpp2_ethtool_cls_rule_ins(struct mvpp2_port *port,
efs->rule.flow_type = mvpp2_cls_ethtool_flow_to_type(info->fs.flow_type);
if (efs->rule.flow_type < 0) {
ret = efs->rule.flow_type;
goto clean_rule;
goto clean_eth_rule;
}
ret = mvpp2_cls_rfs_parse_rule(&efs->rule);

View File

@@ -1338,7 +1338,7 @@ int octep_device_setup(struct octep_device *oct)
ret = octep_ctrl_net_init(oct);
if (ret)
return ret;
goto unsupported_dev;
INIT_WORK(&oct->tx_timeout_task, octep_tx_timeout_task);
INIT_WORK(&oct->ctrl_mbox_task, octep_ctrl_mbox_task);

View File

@@ -613,3 +613,19 @@ void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
cq->dbg = NULL;
}
}
static int vhca_id_show(struct seq_file *file, void *priv)
{
struct mlx5_core_dev *dev = file->private;
seq_printf(file, "0x%x\n", MLX5_CAP_GEN(dev, vhca_id));
return 0;
}
DEFINE_SHOW_ATTRIBUTE(vhca_id);
void mlx5_vhca_debugfs_init(struct mlx5_core_dev *dev)
{
debugfs_create_file("vhca_id", 0400, dev->priv.dbg.dbg_root, dev,
&vhca_id_fops);
}

View File

@@ -575,3 +575,17 @@ bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev
return plen && flen && flen == plen &&
!memcmp(fsystem_guid, psystem_guid, flen);
}
void mlx5_core_reps_aux_devs_remove(struct mlx5_core_dev *dev)
{
struct mlx5_priv *priv = &dev->priv;
if (priv->adev[MLX5_INTERFACE_PROTOCOL_ETH])
device_lock_assert(&priv->adev[MLX5_INTERFACE_PROTOCOL_ETH]->adev.dev);
else
mlx5_core_err(dev, "ETH driver already removed\n");
if (priv->adev[MLX5_INTERFACE_PROTOCOL_IB_REP])
del_adev(&priv->adev[MLX5_INTERFACE_PROTOCOL_IB_REP]->adev);
if (priv->adev[MLX5_INTERFACE_PROTOCOL_ETH_REP])
del_adev(&priv->adev[MLX5_INTERFACE_PROTOCOL_ETH_REP]->adev);
}

View File

@@ -430,7 +430,8 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
attrs->replay_esn.esn = sa_entry->esn_state.esn;
attrs->replay_esn.esn_msb = sa_entry->esn_state.esn_msb;
attrs->replay_esn.overlap = sa_entry->esn_state.overlap;
if (attrs->dir == XFRM_DEV_OFFLOAD_OUT)
if (attrs->dir == XFRM_DEV_OFFLOAD_OUT ||
x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
goto skip_replay_window;
switch (x->replay_esn->replay_window) {

View File

@@ -177,8 +177,6 @@ bool mlx5e_psp_handle_tx_skb(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct net *net = sock_net(skb->sk);
const struct ipv6hdr *ip6;
struct tcphdr *th;
if (!mlx5e_psp_set_state(priv, skb, psp_st))
return true;
@@ -190,11 +188,18 @@ bool mlx5e_psp_handle_tx_skb(struct net_device *netdev,
return false;
}
if (skb_is_gso(skb)) {
ip6 = ipv6_hdr(skb);
th = inner_tcp_hdr(skb);
int len = skb_shinfo(skb)->gso_size + inner_tcp_hdrlen(skb);
struct tcphdr *th = inner_tcp_hdr(skb);
th->check = ~tcp_v6_check(skb_shinfo(skb)->gso_size + inner_tcp_hdrlen(skb), &ip6->saddr,
&ip6->daddr, 0);
if (skb->protocol == htons(ETH_P_IP)) {
const struct iphdr *ip = ip_hdr(skb);
th->check = ~tcp_v4_check(len, ip->saddr, ip->daddr, 0);
} else {
const struct ipv6hdr *ip6 = ipv6_hdr(skb);
th->check = ~tcp_v6_check(len, &ip6->saddr, &ip6->daddr, 0);
}
}
return true;

View File

@@ -4052,6 +4052,8 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
mlx5e_queue_update_stats(priv);
}
netdev_stats_to_stats64(stats, &dev->stats);
if (mlx5e_is_uplink_rep(priv)) {
struct mlx5e_vport_stats *vstats = &priv->stats.vport;
@@ -4068,21 +4070,21 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
mlx5e_fold_sw_stats64(priv, stats);
}
stats->rx_missed_errors = priv->stats.qcnt.rx_out_of_buffer;
stats->rx_dropped = PPORT_2863_GET(pstats, if_in_discards);
stats->rx_missed_errors += priv->stats.qcnt.rx_out_of_buffer;
stats->rx_dropped += PPORT_2863_GET(pstats, if_in_discards);
stats->rx_length_errors =
stats->rx_length_errors +=
PPORT_802_3_GET(pstats, a_in_range_length_errors) +
PPORT_802_3_GET(pstats, a_out_of_range_length_field) +
PPORT_802_3_GET(pstats, a_frame_too_long_errors) +
VNIC_ENV_GET(&priv->stats.vnic, eth_wqe_too_small);
stats->rx_crc_errors =
stats->rx_crc_errors +=
PPORT_802_3_GET(pstats, a_frame_check_sequence_errors);
stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors);
stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards);
stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
stats->rx_frame_errors;
stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors;
stats->rx_frame_errors += PPORT_802_3_GET(pstats, a_alignment_errors);
stats->tx_aborted_errors += PPORT_2863_GET(pstats, if_out_discards);
stats->rx_errors += stats->rx_length_errors + stats->rx_crc_errors +
stats->rx_frame_errors;
stats->tx_errors += stats->tx_aborted_errors + stats->tx_carrier_errors;
}
static void mlx5e_nic_set_rx_mode(struct mlx5e_priv *priv)
@@ -6842,6 +6844,7 @@ static void _mlx5e_remove(struct auxiliary_device *adev)
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = edev->mdev;
mlx5_eswitch_safe_aux_devs_remove(mdev);
mlx5_core_uplink_netdev_set(mdev, NULL);
if (priv->profile)

View File

@@ -2147,11 +2147,14 @@ static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow,
static void mlx5e_tc_del_fdb_peers_flow(struct mlx5e_tc_flow *flow)
{
struct mlx5_devcom_comp_dev *devcom;
struct mlx5_devcom_comp_dev *pos;
struct mlx5_eswitch *peer_esw;
int i;
for (i = 0; i < MLX5_MAX_PORTS; i++) {
if (i == mlx5_get_dev_index(flow->priv->mdev))
continue;
devcom = flow->priv->mdev->priv.eswitch->devcom;
mlx5_devcom_for_each_peer_entry(devcom, peer_esw, pos) {
i = mlx5_get_dev_index(peer_esw->dev);
mlx5e_tc_del_fdb_peer_flow(flow, i);
}
}
@@ -5513,12 +5516,16 @@ int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags)
void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw)
{
struct mlx5_devcom_comp_dev *devcom;
struct mlx5_devcom_comp_dev *pos;
struct mlx5e_tc_flow *flow, *tmp;
struct mlx5_eswitch *peer_esw;
int i;
for (i = 0; i < MLX5_MAX_PORTS; i++) {
if (i == mlx5_get_dev_index(esw->dev))
continue;
devcom = esw->devcom;
mlx5_devcom_for_each_peer_entry(devcom, peer_esw, pos) {
i = mlx5_get_dev_index(peer_esw->dev);
list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows[i], peer[i])
mlx5e_tc_del_fdb_peers_flow(flow);
}

View File

@@ -188,7 +188,7 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
if (IS_ERR(vport->ingress.acl)) {
err = PTR_ERR(vport->ingress.acl);
vport->ingress.acl = NULL;
return err;
goto out;
}
err = esw_acl_ingress_lgcy_groups_create(esw, vport);

View File

@@ -929,6 +929,7 @@ int mlx5_esw_ipsec_vf_packet_offload_set(struct mlx5_eswitch *esw, struct mlx5_v
int mlx5_esw_ipsec_vf_packet_offload_supported(struct mlx5_core_dev *dev,
u16 vport_num);
bool mlx5_esw_host_functions_enabled(const struct mlx5_core_dev *dev);
void mlx5_eswitch_safe_aux_devs_remove(struct mlx5_core_dev *dev);
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
@@ -1009,9 +1010,12 @@ mlx5_esw_host_functions_enabled(const struct mlx5_core_dev *dev)
static inline bool
mlx5_esw_vport_vhca_id(struct mlx5_eswitch *esw, u16 vportn, u16 *vhca_id)
{
return -EOPNOTSUPP;
return false;
}
static inline void
mlx5_eswitch_safe_aux_devs_remove(struct mlx5_core_dev *dev) {}
#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_ESWITCH_H__ */

View File

@@ -3981,6 +3981,32 @@ static bool mlx5_devlink_switchdev_active_mode_change(struct mlx5_eswitch *esw,
return true;
}
#define MLX5_ESW_HOLD_TIMEOUT_MS 7000
#define MLX5_ESW_HOLD_RETRY_DELAY_MS 500
void mlx5_eswitch_safe_aux_devs_remove(struct mlx5_core_dev *dev)
{
unsigned long timeout;
bool hold_esw = true;
/* Wait for any concurrent eswitch mode transition to complete. */
if (!mlx5_esw_hold(dev)) {
timeout = jiffies + msecs_to_jiffies(MLX5_ESW_HOLD_TIMEOUT_MS);
while (!mlx5_esw_hold(dev)) {
if (!time_before(jiffies, timeout)) {
hold_esw = false;
break;
}
msleep(MLX5_ESW_HOLD_RETRY_DELAY_MS);
}
}
if (hold_esw) {
if (mlx5_eswitch_mode(dev) == MLX5_ESWITCH_OFFLOADS)
mlx5_core_reps_aux_devs_remove(dev);
mlx5_esw_release(dev);
}
}
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack)
{

View File

@@ -1198,7 +1198,8 @@ int mlx5_fs_cmd_set_tx_flow_table_root(struct mlx5_core_dev *dev, u32 ft_id, boo
u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {};
u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {};
if (disconnect && MLX5_CAP_FLOWTABLE_NIC_TX(dev, reset_root_to_default))
if (disconnect &&
!MLX5_CAP_FLOWTABLE_NIC_TX(dev, reset_root_to_default))
return -EOPNOTSUPP;
MLX5_SET(set_flow_table_root_in, in, opcode,

View File

@@ -1806,16 +1806,6 @@ err:
return -ENOMEM;
}
static int vhca_id_show(struct seq_file *file, void *priv)
{
struct mlx5_core_dev *dev = file->private;
seq_printf(file, "0x%x\n", MLX5_CAP_GEN(dev, vhca_id));
return 0;
}
DEFINE_SHOW_ATTRIBUTE(vhca_id);
static int mlx5_notifiers_init(struct mlx5_core_dev *dev)
{
int err;
@@ -1884,7 +1874,7 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
priv->numa_node = dev_to_node(mlx5_core_dma_dev(dev));
priv->dbg.dbg_root = debugfs_create_dir(dev_name(dev->device),
mlx5_debugfs_root);
debugfs_create_file("vhca_id", 0400, priv->dbg.dbg_root, dev, &vhca_id_fops);
INIT_LIST_HEAD(&priv->traps);
err = mlx5_cmd_init(dev);
@@ -2022,6 +2012,8 @@ static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_init_one;
}
mlx5_vhca_debugfs_init(dev);
pci_save_state(pdev);
return 0;

View File

@@ -258,6 +258,7 @@ int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages);
void mlx5_cmd_flush(struct mlx5_core_dev *dev);
void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
void mlx5_vhca_debugfs_init(struct mlx5_core_dev *dev);
int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group,
u8 access_reg_group);
@@ -290,6 +291,7 @@ int mlx5_register_device(struct mlx5_core_dev *dev);
void mlx5_unregister_device(struct mlx5_core_dev *dev);
void mlx5_dev_set_lightweight(struct mlx5_core_dev *dev);
bool mlx5_dev_is_lightweight(struct mlx5_core_dev *dev);
void mlx5_core_reps_aux_devs_remove(struct mlx5_core_dev *dev);
void mlx5_fw_reporters_create(struct mlx5_core_dev *dev);
int mlx5_query_mtpps(struct mlx5_core_dev *dev, u32 *mtpps, u32 mtpps_size);

View File

@@ -76,6 +76,7 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia
goto init_one_err;
}
mlx5_vhca_debugfs_init(mdev);
return 0;
init_one_err:

View File

@@ -1524,9 +1524,8 @@ static void rocker_world_port_post_fini(struct rocker_port *rocker_port)
{
struct rocker_world_ops *wops = rocker_port->rocker->wops;
if (!wops->port_post_fini)
return;
wops->port_post_fini(rocker_port);
if (wops->port_post_fini)
wops->port_post_fini(rocker_port);
kfree(rocker_port->wpriv);
}

View File

@@ -2182,12 +2182,7 @@ int efx_mcdi_rx_pull_rss_context_config(struct efx_nic *efx,
int efx_mcdi_rx_pull_rss_config(struct efx_nic *efx)
{
int rc;
mutex_lock(&efx->net_dev->ethtool->rss_lock);
rc = efx_mcdi_rx_pull_rss_context_config(efx, &efx->rss_context);
mutex_unlock(&efx->net_dev->ethtool->rss_lock);
return rc;
return efx_mcdi_rx_pull_rss_context_config(efx, &efx->rss_context);
}
void efx_mcdi_rx_restore_rss_contexts(struct efx_nic *efx)

View File

@@ -1099,7 +1099,13 @@ static int emac_read_stat_cnt(struct emac_priv *priv, u8 cnt, u32 *res,
100, 10000);
if (ret) {
netdev_err(priv->ndev, "Read stat timeout\n");
/*
* This could be caused by the PHY stopping its refclk even when
* the link is up, for power saving. See also comments in
* emac_stats_update().
*/
dev_err_ratelimited(&priv->ndev->dev,
"Read stat timeout. PHY clock stopped?\n");
return ret;
}
@@ -1147,17 +1153,25 @@ static void emac_stats_update(struct emac_priv *priv)
assert_spin_locked(&priv->stats_lock);
if (!netif_running(priv->ndev) || !netif_device_present(priv->ndev)) {
/* Not up, don't try to update */
/*
* We can't read statistics if the interface is not up. Also, some PHYs
* stop their reference clocks for link down power saving, which also
* causes reading statistics to time out. Don't update and don't
* reschedule in these cases.
*/
if (!netif_running(priv->ndev) ||
!netif_carrier_ok(priv->ndev) ||
!netif_device_present(priv->ndev)) {
return;
}
for (i = 0; i < sizeof(priv->tx_stats) / sizeof(*tx_stats); i++) {
/*
* If reading stats times out, everything is broken and there's
* nothing we can do. Reading statistics also can't return an
* error, so just return without updating and without
* rescheduling.
* If reading stats times out anyway, the stat registers will be
* stuck, and we can't really recover from that.
*
* Reading statistics also can't return an error, so just return
* without updating and without rescheduling.
*/
if (emac_tx_read_stat_cnt(priv, i, &res))
return;
@@ -1636,6 +1650,12 @@ static void emac_adjust_link(struct net_device *dev)
emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl);
emac_set_fc_autoneg(priv);
/*
* Reschedule stats updates now that link is up. See comments in
* emac_stats_update().
*/
mod_timer(&priv->stats_timer, jiffies);
}
phy_print_status(phydev);

View File

@@ -2643,11 +2643,21 @@ static int kszphy_probe(struct phy_device *phydev)
kszphy_parse_led_mode(phydev);
clk = devm_clk_get_optional_enabled(&phydev->mdio.dev, "rmii-ref");
clk = devm_clk_get_optional(&phydev->mdio.dev, "rmii-ref");
/* NOTE: clk may be NULL if building without CONFIG_HAVE_CLK */
if (!IS_ERR_OR_NULL(clk)) {
unsigned long rate = clk_get_rate(clk);
bool rmii_ref_clk_sel_25_mhz;
unsigned long rate;
int err;
err = clk_prepare_enable(clk);
if (err) {
phydev_err(phydev, "Failed to enable rmii-ref clock\n");
return err;
}
rate = clk_get_rate(clk);
clk_disable_unprepare(clk);
if (type)
priv->rmii_ref_clk_sel = type->has_rmii_ref_clk_sel;
@@ -2665,13 +2675,12 @@ static int kszphy_probe(struct phy_device *phydev)
}
} else if (!clk) {
/* unnamed clock from the generic ethernet-phy binding */
clk = devm_clk_get_optional_enabled(&phydev->mdio.dev, NULL);
clk = devm_clk_get_optional(&phydev->mdio.dev, NULL);
}
if (IS_ERR(clk))
return PTR_ERR(clk);
clk_disable_unprepare(clk);
priv->clk = clk;
if (ksz8041_fiber_mode(phydev))

View File

@@ -395,6 +395,7 @@ static int t7xx_dpmaif_set_frag_to_skb(const struct dpmaif_rx_queue *rxq,
struct sk_buff *skb)
{
unsigned long long data_bus_addr, data_base_addr;
struct skb_shared_info *shinfo = skb_shinfo(skb);
struct device *dev = rxq->dpmaif_ctrl->dev;
struct dpmaif_bat_page *page_info;
unsigned int data_len;
@@ -402,18 +403,22 @@ static int t7xx_dpmaif_set_frag_to_skb(const struct dpmaif_rx_queue *rxq,
page_info = rxq->bat_frag->bat_skb;
page_info += t7xx_normal_pit_bid(pkt_info);
dma_unmap_page(dev, page_info->data_bus_addr, page_info->data_len, DMA_FROM_DEVICE);
if (!page_info->page)
return -EINVAL;
if (shinfo->nr_frags >= MAX_SKB_FRAGS)
return -EINVAL;
dma_unmap_page(dev, page_info->data_bus_addr, page_info->data_len, DMA_FROM_DEVICE);
data_bus_addr = le32_to_cpu(pkt_info->pd.data_addr_h);
data_bus_addr = (data_bus_addr << 32) + le32_to_cpu(pkt_info->pd.data_addr_l);
data_base_addr = page_info->data_bus_addr;
data_offset = data_bus_addr - data_base_addr;
data_offset += page_info->offset;
data_len = FIELD_GET(PD_PIT_DATA_LEN, le32_to_cpu(pkt_info->header));
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page_info->page,
skb_add_rx_frag(skb, shinfo->nr_frags, page_info->page,
data_offset, data_len, page_info->data_len);
page_info->page = NULL;

View File

@@ -806,8 +806,8 @@ static void nvme_unmap_data(struct request *req)
if (!blk_rq_dma_unmap(req, dma_dev, &iod->dma_state, iod->total_len,
map)) {
if (nvme_pci_cmd_use_sgl(&iod->cmd))
nvme_free_sgls(req, iod->descriptors[0],
&iod->cmd.common.dptr.sgl, attrs);
nvme_free_sgls(req, &iod->cmd.common.dptr.sgl,
iod->descriptors[0], attrs);
else
nvme_free_prps(req, attrs);
}

View File

@@ -180,9 +180,10 @@ u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
static void nvmet_bio_done(struct bio *bio)
{
struct nvmet_req *req = bio->bi_private;
blk_status_t blk_status = bio->bi_status;
nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status));
nvmet_req_bio_put(req, bio);
nvmet_req_complete(req, blk_to_nvme_status(req, blk_status));
}
#ifdef CONFIG_BLK_DEV_INTEGRITY

View File

@@ -157,13 +157,19 @@ static int __init __reserved_mem_reserve_reg(unsigned long node,
phys_addr_t base, size;
int i, len;
const __be32 *prop;
bool nomap;
bool nomap, default_cma;
prop = of_flat_dt_get_addr_size_prop(node, "reg", &len);
if (!prop)
return -ENOENT;
nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL);
if (default_cma && cma_skip_dt_default_reserved_mem()) {
pr_err("Skipping dt linux,cma-default for \"cma=\" kernel param.\n");
return -EINVAL;
}
for (i = 0; i < len; i++) {
u64 b, s;
@@ -248,10 +254,13 @@ void __init fdt_scan_reserved_mem_reg_nodes(void)
fdt_for_each_subnode(child, fdt, node) {
const char *uname;
bool default_cma = of_get_flat_dt_prop(child, "linux,cma-default", NULL);
u64 b, s;
if (!of_fdt_device_is_available(fdt, child))
continue;
if (default_cma && cma_skip_dt_default_reserved_mem())
continue;
if (!of_flat_dt_get_addr_size(child, "reg", &b, &s))
continue;
@@ -389,7 +398,7 @@ static int __init __reserved_mem_alloc_size(unsigned long node, const char *unam
phys_addr_t base = 0, align = 0, size;
int i, len;
const __be32 *prop;
bool nomap;
bool nomap, default_cma;
int ret;
prop = of_get_flat_dt_prop(node, "size", &len);
@@ -413,6 +422,12 @@ static int __init __reserved_mem_alloc_size(unsigned long node, const char *unam
}
nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL);
if (default_cma && cma_skip_dt_default_reserved_mem()) {
pr_err("Skipping dt linux,cma-default for \"cma=\" kernel param.\n");
return -EINVAL;
}
/* Need adjust the alignment to satisfy the CMA requirement */
if (IS_ENABLED(CONFIG_CMA)

View File

@@ -619,7 +619,7 @@ static int meson_gpiolib_register(struct meson_pinctrl *pc)
pc->chip.set = meson_gpio_set;
pc->chip.base = -1;
pc->chip.ngpio = pc->data->num_pins;
pc->chip.can_sleep = false;
pc->chip.can_sleep = true;
ret = gpiochip_add_data(&pc->chip, pc);
if (ret) {

View File

@@ -3545,10 +3545,9 @@ static int rockchip_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
return 0;
}
static int rockchip_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
unsigned offset,
bool input)
static int rockchip_pmx_gpio_request_enable(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
unsigned int offset)
{
struct rockchip_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
struct rockchip_pin_bank *bank;
@@ -3562,7 +3561,7 @@ static const struct pinmux_ops rockchip_pmx_ops = {
.get_function_name = rockchip_pmx_get_func_name,
.get_function_groups = rockchip_pmx_get_groups,
.set_mux = rockchip_pmx_set,
.gpio_set_direction = rockchip_pmx_gpio_set_direction,
.gpio_request_enable = rockchip_pmx_gpio_request_enable,
};
/*

View File

@@ -287,7 +287,7 @@ static const struct pinctrl_pin_desc th1520_group3_pins[] = {
TH1520_PAD(5, QSPI0_D0_MOSI, QSPI, PWM, I2S, GPIO, ____, ____, 0),
TH1520_PAD(6, QSPI0_D1_MISO, QSPI, PWM, I2S, GPIO, ____, ____, 0),
TH1520_PAD(7, QSPI0_D2_WP, QSPI, PWM, I2S, GPIO, ____, ____, 0),
TH1520_PAD(8, QSPI1_D3_HOLD, QSPI, ____, I2S, GPIO, ____, ____, 0),
TH1520_PAD(8, QSPI0_D3_HOLD, QSPI, ____, I2S, GPIO, ____, ____, 0),
TH1520_PAD(9, I2C2_SCL, I2C, UART, ____, GPIO, ____, ____, 0),
TH1520_PAD(10, I2C2_SDA, I2C, UART, ____, GPIO, ____, ____, 0),
TH1520_PAD(11, I2C3_SCL, I2C, ____, ____, GPIO, ____, ____, 0),

View File

@@ -61,13 +61,14 @@ config PINCTRL_LPASS_LPI
(Low Power Island) found on the Qualcomm Technologies Inc SoCs.
config PINCTRL_SC7280_LPASS_LPI
tristate "Qualcomm Technologies Inc SC7280 LPASS LPI pin controller driver"
tristate "Qualcomm Technologies Inc SC7280 and SM8350 LPASS LPI pin controller driver"
depends on ARM64 || COMPILE_TEST
depends on PINCTRL_LPASS_LPI
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm Technologies Inc LPASS (Low Power Audio SubSystem) LPI
(Low Power Island) found on the Qualcomm Technologies Inc SC7280 platform.
(Low Power Island) found on the Qualcomm Technologies Inc SC7280
and SM8350 platforms.
config PINCTRL_SDM660_LPASS_LPI
tristate "Qualcomm Technologies Inc SDM660 LPASS LPI pin controller driver"
@@ -106,16 +107,6 @@ config PINCTRL_SM8250_LPASS_LPI
Qualcomm Technologies Inc LPASS (Low Power Audio SubSystem) LPI
(Low Power Island) found on the Qualcomm Technologies Inc SM8250 platform.
config PINCTRL_SM8350_LPASS_LPI
tristate "Qualcomm Technologies Inc SM8350 LPASS LPI pin controller driver"
depends on ARM64 || COMPILE_TEST
depends on PINCTRL_LPASS_LPI
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm Technologies Inc LPASS (Low Power Audio SubSystem) LPI
(Low Power Island) found on the Qualcomm Technologies Inc SM8350
platform.
config PINCTRL_SM8450_LPASS_LPI
tristate "Qualcomm Technologies Inc SM8450 LPASS LPI pin controller driver"
depends on ARM64 || COMPILE_TEST

View File

@@ -64,7 +64,6 @@ obj-$(CONFIG_PINCTRL_SM8150) += pinctrl-sm8150.o
obj-$(CONFIG_PINCTRL_SM8250) += pinctrl-sm8250.o
obj-$(CONFIG_PINCTRL_SM8250_LPASS_LPI) += pinctrl-sm8250-lpass-lpi.o
obj-$(CONFIG_PINCTRL_SM8350) += pinctrl-sm8350.o
obj-$(CONFIG_PINCTRL_SM8350_LPASS_LPI) += pinctrl-sm8350-lpass-lpi.o
obj-$(CONFIG_PINCTRL_SM8450) += pinctrl-sm8450.o
obj-$(CONFIG_PINCTRL_SM8450_LPASS_LPI) += pinctrl-sm8450-lpass-lpi.o
obj-$(CONFIG_PINCTRL_SM8550) += pinctrl-sm8550.o

Some files were not shown because too many files have changed in this diff Show More