2
0
mirror of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2025-09-04 20:19:47 +08:00

ASoC: Fixes for v6.17

A few fixes that came in during the past week, there's some updates for
 the CS35L56 which adjust the driver for production silicon and a fix for
 buggy resume of the ES9389.
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCgAdFiEEreZoqmdXGLWf4p/qJNaLcl1Uh9AFAmimWXAACgkQJNaLcl1U
 h9BGKAf+MczirBmF8BaLvCYqWRWmqhSMuNsKibzl8E3Acf2/NHhX1sdKsqOPss4L
 Lje68EfywFP8U8/iARv6Aiijm1oIpF6C6U5GAd5ArInfmkIgITGF+OYObAmZdVbm
 7NX8xSk4KgwCzZOv+3JG34wECmVtXrBnNpd7/Bo+RM+xxcxyUVUFcdMPuoUQvef9
 Jc2cRDn9N9Lo7qi9DgXaBBH/cW4cgic1+CWEmoxSMtQJ1hXaxgaRYskfb5j4Sb1f
 w1Lhw46eMMuTOkKQ1G8sJ0zqM/bS8ZkT0IFSSNDSRXXpLThPj/fNBjxYYBM01brG
 r0shD5ju/2OgS13wWMPkfijzaU5dWw==
 =nURg
 -----END PGP SIGNATURE-----

Merge tag 'asoc-fix-v6.17-rc2' of https://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into for-linus

ASoC: Fixes for v6.17

A few fixes that came in during the past week, there's some updates for
the CS35L56 which adjust the driver for production silicon and a fix for
buggy resume of the ES9389.
This commit is contained in:
Takashi Iwai 2025-08-21 09:02:28 +02:00
commit 279eb50aa8
191 changed files with 1638 additions and 1145 deletions

View File

@ -731,7 +731,7 @@ Contact: linux-block@vger.kernel.org
Description:
[RW] If the device is registered for writeback throttling, then
this file shows the target minimum read latency. If this latency
is exceeded in a given window of time (see wb_window_usec), then
is exceeded in a given window of time (see curr_win_nsec), then
the writeback throttling will start scaling back writes. Writing
a value of '0' to this file disables the feature. Writing a
value of '-1' to this file resets the value to the default

View File

@ -79,7 +79,7 @@ zone_capacity_mb Device zone capacity (must always be equal to or lower than
the zone size. Default: zone size.
conv_zones Total number of conventioanl zones starting from sector 0.
Default: 8.
base_dir Path to the base directoy where to create the directory
base_dir Path to the base directory where to create the directory
containing the zone files of the device.
Default=/var/local/zloop.
The device directory containing the zone files is always

View File

@ -214,7 +214,7 @@ Spectre_v1 X
Spectre_v2 X X
Spectre_v2_user X X * (Note 1)
SRBDS X X X X
SRSO X X
SRSO X X X X
SSB (Note 4)
TAA X X X X * (Note 2)
TSA X X X X

View File

@ -62,11 +62,13 @@ properties:
items:
- description: GMAC main clock
- description: Peripheral registers interface clock
- description: APB glue registers interface clock
clock-names:
items:
- const: stmmaceth
- const: pclk
- const: apb
interrupts:
items:
@ -88,8 +90,8 @@ examples:
compatible = "thead,th1520-gmac", "snps,dwmac-3.70a";
reg = <0xe7070000 0x2000>, <0xec003000 0x1000>;
reg-names = "dwmac", "apb";
clocks = <&clk 1>, <&clk 2>;
clock-names = "stmmaceth", "pclk";
clocks = <&clk 1>, <&clk 2>, <&clk 3>;
clock-names = "stmmaceth", "pclk", "apb";
interrupts = <66>;
interrupt-names = "macirq";
phy-mode = "rgmii-id";

View File

@ -1420,7 +1420,7 @@ udp_hash_entries - INTEGER
A negative value means the networking namespace does not own its
hash buckets and shares the initial networking namespace's one.
udp_child_ehash_entries - INTEGER
udp_child_hash_entries - INTEGER
Control the number of hash buckets for UDP sockets in the child
networking namespace, which must be set before clone() or unshare().

View File

@ -11438,6 +11438,7 @@ F: drivers/tty/hvc/
HUNG TASK DETECTOR
M: Andrew Morton <akpm@linux-foundation.org>
R: Lance Yang <lance.yang@linux.dev>
R: Masami Hiramatsu <mhiramat@kernel.org>
L: linux-kernel@vger.kernel.org
S: Maintained
F: include/linux/hung_task.h
@ -12583,10 +12584,9 @@ S: Supported
F: drivers/cpufreq/intel_pstate.c
INTEL PTP DFL ToD DRIVER
M: Tianfei Zhang <tianfei.zhang@intel.com>
L: linux-fpga@vger.kernel.org
L: netdev@vger.kernel.org
S: Maintained
S: Orphan
F: drivers/ptp/ptp_dfl_tod.c
INTEL QUADRATURE ENCODER PERIPHERAL DRIVER
@ -12724,9 +12724,8 @@ S: Maintained
F: drivers/platform/x86/intel/wmi/thunderbolt.c
INTEL WWAN IOSM DRIVER
M: M Chetan Kumar <m.chetan.kumar@intel.com>
L: netdev@vger.kernel.org
S: Maintained
S: Orphan
F: drivers/net/wwan/iosm/
INTEL(R) FLEXIBLE RETURN AND EVENT DELIVERY
@ -13686,7 +13685,6 @@ F: scripts/Makefile.kmsan
KPROBES
M: Naveen N Rao <naveen@kernel.org>
M: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
M: "David S. Miller" <davem@davemloft.net>
M: Masami Hiramatsu <mhiramat@kernel.org>
L: linux-kernel@vger.kernel.org
@ -15674,7 +15672,6 @@ MEDIATEK T7XX 5G WWAN MODEM DRIVER
M: Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
R: Chiranjeevi Rapolu <chiranjeevi.rapolu@linux.intel.com>
R: Liu Haijun <haijun.liu@mediatek.com>
R: M Chetan Kumar <m.chetan.kumar@linux.intel.com>
R: Ricardo Martinez <ricardo.martinez@linux.intel.com>
L: netdev@vger.kernel.org
S: Supported
@ -17451,6 +17448,7 @@ F: drivers/net/ethernet/neterion/
NETFILTER
M: Pablo Neira Ayuso <pablo@netfilter.org>
M: Jozsef Kadlecsik <kadlec@netfilter.org>
M: Florian Westphal <fw@strlen.de>
L: netfilter-devel@vger.kernel.org
L: coreteam@netfilter.org
S: Maintained

View File

@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 17
SUBLEVEL = 0
EXTRAVERSION = -rc1
EXTRAVERSION = -rc2
NAME = Baby Opossum Posse
# *DOCUMENTATION*

View File

@ -297,8 +297,9 @@
reg-names = "dwmac", "apb";
interrupts = <67 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "macirq";
clocks = <&clk CLK_GMAC_AXI>, <&clk CLK_GMAC1>;
clock-names = "stmmaceth", "pclk";
clocks = <&clk CLK_GMAC_AXI>, <&clk CLK_GMAC1>,
<&clk CLK_PERISYS_APB4_HCLK>;
clock-names = "stmmaceth", "pclk", "apb";
snps,pbl = <32>;
snps,fixed-burst;
snps,multicast-filter-bins = <64>;
@ -319,8 +320,9 @@
reg-names = "dwmac", "apb";
interrupts = <66 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "macirq";
clocks = <&clk CLK_GMAC_AXI>, <&clk CLK_GMAC0>;
clock-names = "stmmaceth", "pclk";
clocks = <&clk CLK_GMAC_AXI>, <&clk CLK_GMAC0>,
<&clk CLK_PERISYS_APB4_HCLK>;
clock-names = "stmmaceth", "pclk", "apb";
snps,pbl = <32>;
snps,fixed-burst;
snps,multicast-filter-bins = <64>;

View File

@ -106,5 +106,18 @@ void get_cpuflags(void)
cpuid(0x80000001, &ignored, &ignored, &cpu.flags[6],
&cpu.flags[1]);
}
if (max_amd_level >= 0x8000001f) {
u32 ebx;
/*
* The X86_FEATURE_COHERENCY_SFW_NO feature bit is in
* the virtualization flags entry (word 8) and set by
* scattered.c, so the bit needs to be explicitly set.
*/
cpuid(0x8000001f, &ignored, &ebx, &ignored, &ignored);
if (ebx & BIT(31))
set_bit(X86_FEATURE_COHERENCY_SFW_NO, cpu.flags);
}
}
}

View File

@ -785,6 +785,7 @@ static void __head svsm_pval_4k_page(unsigned long paddr, bool validate)
pc->entry[0].page_size = RMP_PG_SIZE_4K;
pc->entry[0].action = validate;
pc->entry[0].ignore_cf = 0;
pc->entry[0].rsvd = 0;
pc->entry[0].pfn = paddr >> PAGE_SHIFT;
/* Protocol 0, Call ID 1 */
@ -810,6 +811,13 @@ static void __head pvalidate_4k_page(unsigned long vaddr, unsigned long paddr,
if (ret)
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
}
/*
* If validating memory (making it private) and affected by the
* cache-coherency vulnerability, perform the cache eviction mitigation.
*/
if (validate && !has_cpuflag(X86_FEATURE_COHERENCY_SFW_NO))
sev_evict_cache((void *)vaddr, 1);
}
/*

View File

@ -227,6 +227,7 @@ static u64 svsm_build_ca_from_pfn_range(u64 pfn, u64 pfn_end, bool action,
pe->page_size = RMP_PG_SIZE_4K;
pe->action = action;
pe->ignore_cf = 0;
pe->rsvd = 0;
pe->pfn = pfn;
pe++;
@ -257,6 +258,7 @@ static int svsm_build_ca_from_psc_desc(struct snp_psc_desc *desc, unsigned int d
pe->page_size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K;
pe->action = e->operation == SNP_PAGE_STATE_PRIVATE;
pe->ignore_cf = 0;
pe->rsvd = 0;
pe->pfn = e->gfn;
pe++;
@ -358,10 +360,31 @@ static void svsm_pval_pages(struct snp_psc_desc *desc)
static void pvalidate_pages(struct snp_psc_desc *desc)
{
struct psc_entry *e;
unsigned int i;
if (snp_vmpl)
svsm_pval_pages(desc);
else
pval_pages(desc);
/*
* If not affected by the cache-coherency vulnerability there is no need
* to perform the cache eviction mitigation.
*/
if (cpu_feature_enabled(X86_FEATURE_COHERENCY_SFW_NO))
return;
for (i = 0; i <= desc->hdr.end_entry; i++) {
e = &desc->entries[i];
/*
* If validating memory (making it private) perform the cache
* eviction mitigation.
*/
if (e->operation == SNP_PAGE_STATE_PRIVATE)
sev_evict_cache(pfn_to_kaddr(e->gfn), e->pagesize ? 512 : 1);
}
}
static int vmgexit_psc(struct ghcb *ghcb, struct snp_psc_desc *desc)

View File

@ -371,29 +371,30 @@ static enum es_result __vc_handle_msr_caa(struct pt_regs *regs, bool write)
* executing with Secure TSC enabled, so special handling is required for
* accesses of MSR_IA32_TSC and MSR_AMD64_GUEST_TSC_FREQ.
*/
static enum es_result __vc_handle_secure_tsc_msrs(struct pt_regs *regs, bool write)
static enum es_result __vc_handle_secure_tsc_msrs(struct es_em_ctxt *ctxt, bool write)
{
struct pt_regs *regs = ctxt->regs;
u64 tsc;
/*
* GUEST_TSC_FREQ should not be intercepted when Secure TSC is enabled.
* Terminate the SNP guest when the interception is enabled.
* Writing to MSR_IA32_TSC can cause subsequent reads of the TSC to
* return undefined values, and GUEST_TSC_FREQ is read-only. Generate
* a #GP on all writes.
*/
if (write) {
ctxt->fi.vector = X86_TRAP_GP;
ctxt->fi.error_code = 0;
return ES_EXCEPTION;
}
/*
* GUEST_TSC_FREQ read should not be intercepted when Secure TSC is
* enabled. Terminate the guest if a read is attempted.
*/
if (regs->cx == MSR_AMD64_GUEST_TSC_FREQ)
return ES_VMM_ERROR;
/*
* Writes: Writing to MSR_IA32_TSC can cause subsequent reads of the TSC
* to return undefined values, so ignore all writes.
*
* Reads: Reads of MSR_IA32_TSC should return the current TSC value, use
* the value returned by rdtsc_ordered().
*/
if (write) {
WARN_ONCE(1, "TSC MSR writes are verboten!\n");
return ES_OK;
}
/* Reads of MSR_IA32_TSC should return the current TSC value. */
tsc = rdtsc_ordered();
regs->ax = lower_32_bits(tsc);
regs->dx = upper_32_bits(tsc);
@ -416,7 +417,7 @@ static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
case MSR_IA32_TSC:
case MSR_AMD64_GUEST_TSC_FREQ:
if (sev_status & MSR_AMD64_SNP_SECURE_TSC)
return __vc_handle_secure_tsc_msrs(regs, write);
return __vc_handle_secure_tsc_msrs(ctxt, write);
break;
default:
break;

View File

@ -218,6 +218,7 @@
#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 1) /* "flexpriority" Intel FlexPriority */
#define X86_FEATURE_EPT ( 8*32+ 2) /* "ept" Intel Extended Page Table */
#define X86_FEATURE_VPID ( 8*32+ 3) /* "vpid" Intel Virtual Processor ID */
#define X86_FEATURE_COHERENCY_SFW_NO ( 8*32+ 4) /* SNP cache coherency software work around not needed */
#define X86_FEATURE_VMMCALL ( 8*32+15) /* "vmmcall" Prefer VMMCALL to VMCALL */
#define X86_FEATURE_XENPV ( 8*32+16) /* Xen paravirtual guest */

View File

@ -1,8 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_CPUID_H
#define _ASM_X86_CPUID_H
#include <asm/cpuid/api.h>
#endif /* _ASM_X86_CPUID_H */

View File

@ -619,6 +619,24 @@ int rmp_make_shared(u64 pfn, enum pg_level level);
void snp_leak_pages(u64 pfn, unsigned int npages);
void kdump_sev_callback(void);
void snp_fixup_e820_tables(void);
static inline void sev_evict_cache(void *va, int npages)
{
volatile u8 val __always_unused;
u8 *bytes = va;
int page_idx;
/*
* For SEV guests, a read from the first/last cache-lines of a 4K page
* using the guest key is sufficient to cause a flush of all cache-lines
* associated with that 4K page without incurring all the overhead of a
* full CLFLUSH sequence.
*/
for (page_idx = 0; page_idx < npages; page_idx++) {
val = bytes[page_idx * PAGE_SIZE];
val = bytes[page_idx * PAGE_SIZE + PAGE_SIZE - 1];
}
}
#else
static inline bool snp_probe_rmptable_info(void) { return false; }
static inline int snp_rmptable_init(void) { return -ENOSYS; }
@ -634,6 +652,7 @@ static inline int rmp_make_shared(u64 pfn, enum pg_level level) { return -ENODEV
static inline void snp_leak_pages(u64 pfn, unsigned int npages) {}
static inline void kdump_sev_callback(void) { }
static inline void snp_fixup_e820_tables(void) {}
static inline void sev_evict_cache(void *va, int npages) {}
#endif
#endif

View File

@ -386,7 +386,6 @@ static bool __init should_mitigate_vuln(unsigned int bug)
case X86_BUG_SPECTRE_V2:
case X86_BUG_RETBLEED:
case X86_BUG_SRSO:
case X86_BUG_L1TF:
case X86_BUG_ITS:
return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) ||
@ -3184,8 +3183,18 @@ static void __init srso_select_mitigation(void)
}
if (srso_mitigation == SRSO_MITIGATION_AUTO) {
if (should_mitigate_vuln(X86_BUG_SRSO)) {
/*
* Use safe-RET if user->kernel or guest->host protection is
* required. Otherwise the 'microcode' mitigation is sufficient
* to protect the user->user and guest->guest vectors.
*/
if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) ||
(cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) &&
!boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO))) {
srso_mitigation = SRSO_MITIGATION_SAFE_RET;
} else if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) ||
cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST)) {
srso_mitigation = SRSO_MITIGATION_MICROCODE;
} else {
srso_mitigation = SRSO_MITIGATION_NONE;
return;

View File

@ -48,6 +48,7 @@ static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
{ X86_FEATURE_AMD_FAST_CPPC, CPUID_EDX, 15, 0x80000007, 0 },
{ X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 },
{ X86_FEATURE_COHERENCY_SFW_NO, CPUID_EBX, 31, 0x8000001f, 0 },
{ X86_FEATURE_SMBA, CPUID_EBX, 2, 0x80000020, 0 },
{ X86_FEATURE_BMEC, CPUID_EBX, 3, 0x80000020, 0 },
{ X86_FEATURE_TSA_SQ_NO, CPUID_ECX, 1, 0x80000021, 0 },

View File

@ -1881,19 +1881,20 @@ long fpu_xstate_prctl(int option, unsigned long arg2)
#ifdef CONFIG_PROC_PID_ARCH_STATUS
/*
* Report the amount of time elapsed in millisecond since last AVX512
* use in the task.
* use in the task. Report -1 if no AVX-512 usage.
*/
static void avx512_status(struct seq_file *m, struct task_struct *task)
{
unsigned long timestamp = READ_ONCE(x86_task_fpu(task)->avx512_timestamp);
long delta;
unsigned long timestamp;
long delta = -1;
if (!timestamp) {
/*
* Report -1 if no AVX512 usage
*/
delta = -1;
} else {
/* AVX-512 usage is not tracked for kernel threads. Don't report anything. */
if (task->flags & (PF_KTHREAD | PF_USER_WORKER))
return;
timestamp = READ_ONCE(x86_task_fpu(task)->avx512_timestamp);
if (timestamp) {
delta = (long)(jiffies - timestamp);
/*
* Cap to LONG_MAX if time difference > LONG_MAX

View File

@ -5847,8 +5847,7 @@ static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
goto out;
}
bfqq = kmem_cache_alloc_node(bfq_pool,
GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN,
bfqq = kmem_cache_alloc_node(bfq_pool, GFP_NOWAIT | __GFP_ZERO,
bfqd->queue->node);
if (bfqq) {

View File

@ -394,7 +394,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg, struct gendisk *disk,
/* allocate */
if (!new_blkg) {
new_blkg = blkg_alloc(blkcg, disk, GFP_NOWAIT | __GFP_NOWARN);
new_blkg = blkg_alloc(blkcg, disk, GFP_NOWAIT);
if (unlikely(!new_blkg)) {
ret = -ENOMEM;
goto err_put_css;
@ -1467,7 +1467,7 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
spin_lock_init(&blkcg->lock);
refcount_set(&blkcg->online_pin, 1);
INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN);
INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT);
INIT_HLIST_HEAD(&blkcg->blkg_list);
#ifdef CONFIG_CGROUP_WRITEBACK
INIT_LIST_HEAD(&blkcg->cgwb_list);
@ -1630,7 +1630,7 @@ retry:
pd_prealloc = NULL;
} else {
pd = pol->pd_alloc_fn(disk, blkg->blkcg,
GFP_NOWAIT | __GFP_NOWARN);
GFP_NOWAIT);
}
if (!pd) {

View File

@ -847,7 +847,7 @@ static void blk_queue_release(struct kobject *kobj)
/* nothing to do here, all data is associated with the parent gendisk */
}
static const struct kobj_type blk_queue_ktype = {
const struct kobj_type blk_queue_ktype = {
.default_groups = blk_queue_attr_groups,
.sysfs_ops = &queue_sysfs_ops,
.release = blk_queue_release,
@ -875,15 +875,14 @@ int blk_register_queue(struct gendisk *disk)
struct request_queue *q = disk->queue;
int ret;
kobject_init(&disk->queue_kobj, &blk_queue_ktype);
ret = kobject_add(&disk->queue_kobj, &disk_to_dev(disk)->kobj, "queue");
if (ret < 0)
goto out_put_queue_kobj;
return ret;
if (queue_is_mq(q)) {
ret = blk_mq_sysfs_register(disk);
if (ret)
goto out_put_queue_kobj;
goto out_del_queue_kobj;
}
mutex_lock(&q->sysfs_lock);
@ -903,9 +902,9 @@ int blk_register_queue(struct gendisk *disk)
if (queue_is_mq(q))
elevator_set_default(q);
wbt_enable_default(disk);
blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
wbt_enable_default(disk);
/* Now everything is ready and send out KOBJ_ADD uevent */
kobject_uevent(&disk->queue_kobj, KOBJ_ADD);
@ -934,8 +933,8 @@ out_debugfs_remove:
mutex_unlock(&q->sysfs_lock);
if (queue_is_mq(q))
blk_mq_sysfs_unregister(disk);
out_put_queue_kobj:
kobject_put(&disk->queue_kobj);
out_del_queue_kobj:
kobject_del(&disk->queue_kobj);
return ret;
}
@ -986,5 +985,4 @@ void blk_unregister_queue(struct gendisk *disk)
elevator_set_none(q);
blk_debugfs_remove(disk);
kobject_put(&disk->queue_kobj);
}

View File

@ -85,8 +85,8 @@ struct rq_wb {
u64 sync_issue;
void *sync_cookie;
unsigned long last_issue; /* last non-throttled issue */
unsigned long last_comp; /* last non-throttled comp */
unsigned long last_issue; /* issue time of last read rq */
unsigned long last_comp; /* completion time of last read rq */
unsigned long min_lat_nsec;
struct rq_qos rqos;
struct rq_wait rq_wait[WBT_NUM_RWQ];
@ -248,13 +248,14 @@ static void wbt_done(struct rq_qos *rqos, struct request *rq)
struct rq_wb *rwb = RQWB(rqos);
if (!wbt_is_tracked(rq)) {
if (rwb->sync_cookie == rq) {
rwb->sync_issue = 0;
rwb->sync_cookie = NULL;
}
if (wbt_is_read(rq)) {
if (rwb->sync_cookie == rq) {
rwb->sync_issue = 0;
rwb->sync_cookie = NULL;
}
if (wbt_is_read(rq))
wb_timestamp(rwb, &rwb->last_comp);
}
} else {
WARN_ON_ONCE(rq == rwb->sync_cookie);
__wbt_done(rqos, wbt_flags(rq));

View File

@ -29,6 +29,7 @@ struct elevator_tags;
/* Max future timer expiry for timeouts */
#define BLK_MAX_TIMEOUT (5 * HZ)
extern const struct kobj_type blk_queue_ktype;
extern struct dentry *blk_debugfs_root;
struct blk_flush_queue {

View File

@ -1303,6 +1303,7 @@ static void disk_release(struct device *dev)
disk_free_zone_resources(disk);
xa_destroy(&disk->part_tbl);
kobject_put(&disk->queue_kobj);
disk->queue->disk = NULL;
blk_put_queue(disk->queue);
@ -1486,6 +1487,7 @@ struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
INIT_LIST_HEAD(&disk->slave_bdevs);
#endif
mutex_init(&disk->rqos_state_mutex);
kobject_init(&disk->queue_kobj, &blk_queue_ktype);
return disk;
out_erase_part0:

View File

@ -1829,9 +1829,6 @@ static void hl_release_dmabuf(struct dma_buf *dmabuf)
struct hl_dmabuf_priv *hl_dmabuf = dmabuf->priv;
struct hl_ctx *ctx;
if (!hl_dmabuf)
return;
ctx = hl_dmabuf->ctx;
if (hl_dmabuf->memhash_hnode)
@ -1859,7 +1856,12 @@ static int export_dmabuf(struct hl_ctx *ctx,
{
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
struct hl_device *hdev = ctx->hdev;
int rc, fd;
CLASS(get_unused_fd, fd)(flags);
if (fd < 0) {
dev_err(hdev->dev, "failed to get a file descriptor for a dma-buf, %d\n", fd);
return fd;
}
exp_info.ops = &habanalabs_dmabuf_ops;
exp_info.size = total_size;
@ -1872,13 +1874,6 @@ static int export_dmabuf(struct hl_ctx *ctx,
return PTR_ERR(hl_dmabuf->dmabuf);
}
fd = dma_buf_fd(hl_dmabuf->dmabuf, flags);
if (fd < 0) {
dev_err(hdev->dev, "failed to get a file descriptor for a dma-buf, %d\n", fd);
rc = fd;
goto err_dma_buf_put;
}
hl_dmabuf->ctx = ctx;
hl_ctx_get(hl_dmabuf->ctx);
atomic_inc(&ctx->hdev->dmabuf_export_cnt);
@ -1890,13 +1885,9 @@ static int export_dmabuf(struct hl_ctx *ctx,
get_file(ctx->hpriv->file_priv->filp);
*dmabuf_fd = fd;
fd_install(take_fd(fd), hl_dmabuf->dmabuf->file);
return 0;
err_dma_buf_put:
hl_dmabuf->dmabuf->priv = NULL;
dma_buf_put(hl_dmabuf->dmabuf);
return rc;
}
static int validate_export_params_common(struct hl_device *hdev, u64 addr, u64 size, u64 offset)

View File

@ -2033,7 +2033,7 @@ void __init acpi_ec_ecdt_probe(void)
goto out;
}
if (!strstarts(ecdt_ptr->id, "\\")) {
if (!strlen(ecdt_ptr->id)) {
/*
* The ECDT table on some MSI notebooks contains invalid data, together
* with an empty ID string ("").
@ -2042,9 +2042,13 @@ void __init acpi_ec_ecdt_probe(void)
* a "fully qualified reference to the (...) embedded controller device",
* so this string always has to start with a backslash.
*
* By verifying this we can avoid such faulty ECDT tables in a safe way.
* However some ThinkBook machines have a ECDT table with a valid EC
* description but an invalid ID string ("_SB.PC00.LPCB.EC0").
*
* Because of this we only check if the ID string is empty in order to
* avoid the obvious cases.
*/
pr_err(FW_BUG "Ignoring ECDT due to invalid ID string \"%s\"\n", ecdt_ptr->id);
pr_err(FW_BUG "Ignoring ECDT due to empty ID string\n");
goto out;
}

View File

@ -180,7 +180,7 @@ void acpi_processor_ppc_init(struct cpufreq_policy *policy)
struct acpi_processor *pr = per_cpu(processors, cpu);
int ret;
if (!pr || !pr->performance)
if (!pr)
continue;
/*
@ -197,6 +197,9 @@ void acpi_processor_ppc_init(struct cpufreq_policy *policy)
pr_err("Failed to add freq constraint for CPU%d (%d)\n",
cpu, ret);
if (!pr->performance)
continue;
ret = acpi_processor_get_platform_limit(pr);
if (ret)
pr_err("Failed to update freq constraint for CPU%d (%d)\n",

View File

@ -2075,7 +2075,7 @@ out:
* Check if a link is established. This is a relaxed version of
* ata_phys_link_online() which accounts for the fact that this is potentially
* called after changing the link power management policy, which may not be
* reflected immediately in the SSTAUS register (e.g., we may still be seeing
* reflected immediately in the SStatus register (e.g., we may still be seeing
* the PHY in partial, slumber or devsleep Partial power management state.
* So check that:
* - A device is still present, that is, DET is 1h (Device presence detected
@ -2089,8 +2089,13 @@ static bool ata_eh_link_established(struct ata_link *link)
u32 sstatus;
u8 det, ipm;
/*
* For old IDE/PATA adapters that do not have a valid scr_read method,
* or if reading the SStatus register fails, assume that the device is
* present. Device probe will determine if that is really the case.
*/
if (sata_scr_read(link, SCR_STATUS, &sstatus))
return false;
return true;
det = sstatus & 0x0f;
ipm = (sstatus >> 8) & 0x0f;

View File

@ -3904,21 +3904,16 @@ static int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc,
/* Check cdl_ctrl */
switch (buf[0] & 0x03) {
case 0:
/* Disable CDL if it is enabled */
if (!(dev->flags & ATA_DFLAG_CDL_ENABLED))
return 0;
/* Disable CDL */
ata_dev_dbg(dev, "Disabling CDL\n");
cdl_action = 0;
dev->flags &= ~ATA_DFLAG_CDL_ENABLED;
break;
case 0x02:
/*
* Enable CDL if not already enabled. Since this is mutually
* exclusive with NCQ priority, allow this only if NCQ priority
* is disabled.
* Enable CDL. Since CDL is mutually exclusive with NCQ
* priority, allow this only if NCQ priority is disabled.
*/
if (dev->flags & ATA_DFLAG_CDL_ENABLED)
return 0;
if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED) {
ata_dev_err(dev,
"NCQ priority must be disabled to enable CDL\n");

View File

@ -380,6 +380,9 @@ enum {
/* this is/was a write request */
__EE_WRITE,
/* hand back using mempool_free(e, drbd_buffer_page_pool) */
__EE_RELEASE_TO_MEMPOOL,
/* this is/was a write same request */
__EE_WRITE_SAME,
@ -402,6 +405,7 @@ enum {
#define EE_IN_INTERVAL_TREE (1<<__EE_IN_INTERVAL_TREE)
#define EE_SUBMITTED (1<<__EE_SUBMITTED)
#define EE_WRITE (1<<__EE_WRITE)
#define EE_RELEASE_TO_MEMPOOL (1<<__EE_RELEASE_TO_MEMPOOL)
#define EE_WRITE_SAME (1<<__EE_WRITE_SAME)
#define EE_APPLICATION (1<<__EE_APPLICATION)
#define EE_RS_THIN_REQ (1<<__EE_RS_THIN_REQ)
@ -858,7 +862,6 @@ struct drbd_device {
struct list_head sync_ee; /* IO in progress (P_RS_DATA_REPLY gets written to disk) */
struct list_head done_ee; /* need to send P_WRITE_ACK */
struct list_head read_ee; /* [RS]P_DATA_REQUEST being read */
struct list_head net_ee; /* zero-copy network send in progress */
struct list_head resync_reads;
atomic_t pp_in_use; /* allocated from page pool */
@ -1329,24 +1332,6 @@ extern struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
extern mempool_t drbd_request_mempool;
extern mempool_t drbd_ee_mempool;
/* drbd's page pool, used to buffer data received from the peer,
* or data requested by the peer.
*
* This does not have an emergency reserve.
*
* When allocating from this pool, it first takes pages from the pool.
* Only if the pool is depleted will try to allocate from the system.
*
* The assumption is that pages taken from this pool will be processed,
* and given back, "quickly", and then can be recycled, so we can avoid
* frequent calls to alloc_page(), and still will be able to make progress even
* under memory pressure.
*/
extern struct page *drbd_pp_pool;
extern spinlock_t drbd_pp_lock;
extern int drbd_pp_vacant;
extern wait_queue_head_t drbd_pp_wait;
/* We also need a standard (emergency-reserve backed) page pool
* for meta data IO (activity log, bitmap).
* We can keep it global, as long as it is used as "N pages at a time".
@ -1354,6 +1339,7 @@ extern wait_queue_head_t drbd_pp_wait;
*/
#define DRBD_MIN_POOL_PAGES 128
extern mempool_t drbd_md_io_page_pool;
extern mempool_t drbd_buffer_page_pool;
/* We also need to make sure we get a bio
* when we need it for housekeeping purposes */
@ -1488,10 +1474,7 @@ extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *,
sector_t, unsigned int,
unsigned int,
gfp_t) __must_hold(local);
extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request *,
int);
#define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0)
#define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1)
extern void drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *req);
extern struct page *drbd_alloc_pages(struct drbd_peer_device *, unsigned int, bool);
extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed);
extern int drbd_connected(struct drbd_peer_device *);
@ -1610,16 +1593,6 @@ static inline struct page *page_chain_next(struct page *page)
for (; page && ({ n = page_chain_next(page); 1; }); page = n)
static inline int drbd_peer_req_has_active_page(struct drbd_peer_request *peer_req)
{
struct page *page = peer_req->pages;
page_chain_for_each(page) {
if (page_count(page) > 1)
return 1;
}
return 0;
}
static inline union drbd_state drbd_read_state(struct drbd_device *device)
{
struct drbd_resource *resource = device->resource;

View File

@ -114,20 +114,10 @@ struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
mempool_t drbd_request_mempool;
mempool_t drbd_ee_mempool;
mempool_t drbd_md_io_page_pool;
mempool_t drbd_buffer_page_pool;
struct bio_set drbd_md_io_bio_set;
struct bio_set drbd_io_bio_set;
/* I do not use a standard mempool, because:
1) I want to hand out the pre-allocated objects first.
2) I want to be able to interrupt sleeping allocation with a signal.
Note: This is a single linked list, the next pointer is the private
member of struct page.
*/
struct page *drbd_pp_pool;
DEFINE_SPINLOCK(drbd_pp_lock);
int drbd_pp_vacant;
wait_queue_head_t drbd_pp_wait;
DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
static const struct block_device_operations drbd_ops = {
@ -1611,6 +1601,7 @@ static int _drbd_send_zc_bio(struct drbd_peer_device *peer_device, struct bio *b
static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device,
struct drbd_peer_request *peer_req)
{
bool use_sendpage = !(peer_req->flags & EE_RELEASE_TO_MEMPOOL);
struct page *page = peer_req->pages;
unsigned len = peer_req->i.size;
int err;
@ -1619,8 +1610,13 @@ static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device,
page_chain_for_each(page) {
unsigned l = min_t(unsigned, len, PAGE_SIZE);
err = _drbd_send_page(peer_device, page, 0, l,
page_chain_next(page) ? MSG_MORE : 0);
if (likely(use_sendpage))
err = _drbd_send_page(peer_device, page, 0, l,
page_chain_next(page) ? MSG_MORE : 0);
else
err = _drbd_no_send_page(peer_device, page, 0, l,
page_chain_next(page) ? MSG_MORE : 0);
if (err)
return err;
len -= l;
@ -1962,7 +1958,6 @@ void drbd_init_set_defaults(struct drbd_device *device)
INIT_LIST_HEAD(&device->sync_ee);
INIT_LIST_HEAD(&device->done_ee);
INIT_LIST_HEAD(&device->read_ee);
INIT_LIST_HEAD(&device->net_ee);
INIT_LIST_HEAD(&device->resync_reads);
INIT_LIST_HEAD(&device->resync_work.list);
INIT_LIST_HEAD(&device->unplug_work.list);
@ -2043,7 +2038,6 @@ void drbd_device_cleanup(struct drbd_device *device)
D_ASSERT(device, list_empty(&device->sync_ee));
D_ASSERT(device, list_empty(&device->done_ee));
D_ASSERT(device, list_empty(&device->read_ee));
D_ASSERT(device, list_empty(&device->net_ee));
D_ASSERT(device, list_empty(&device->resync_reads));
D_ASSERT(device, list_empty(&first_peer_device(device)->connection->sender_work.q));
D_ASSERT(device, list_empty(&device->resync_work.list));
@ -2055,19 +2049,11 @@ void drbd_device_cleanup(struct drbd_device *device)
static void drbd_destroy_mempools(void)
{
struct page *page;
while (drbd_pp_pool) {
page = drbd_pp_pool;
drbd_pp_pool = (struct page *)page_private(page);
__free_page(page);
drbd_pp_vacant--;
}
/* D_ASSERT(device, atomic_read(&drbd_pp_vacant)==0); */
bioset_exit(&drbd_io_bio_set);
bioset_exit(&drbd_md_io_bio_set);
mempool_exit(&drbd_buffer_page_pool);
mempool_exit(&drbd_md_io_page_pool);
mempool_exit(&drbd_ee_mempool);
mempool_exit(&drbd_request_mempool);
@ -2086,9 +2072,8 @@ static void drbd_destroy_mempools(void)
static int drbd_create_mempools(void)
{
struct page *page;
const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count;
int i, ret;
int ret;
/* caches */
drbd_request_cache = kmem_cache_create(
@ -2125,6 +2110,10 @@ static int drbd_create_mempools(void)
if (ret)
goto Enomem;
ret = mempool_init_page_pool(&drbd_buffer_page_pool, number, 0);
if (ret)
goto Enomem;
ret = mempool_init_slab_pool(&drbd_request_mempool, number,
drbd_request_cache);
if (ret)
@ -2134,15 +2123,6 @@ static int drbd_create_mempools(void)
if (ret)
goto Enomem;
for (i = 0; i < number; i++) {
page = alloc_page(GFP_HIGHUSER);
if (!page)
goto Enomem;
set_page_private(page, (unsigned long)drbd_pp_pool);
drbd_pp_pool = page;
}
drbd_pp_vacant = number;
return 0;
Enomem:
@ -2169,10 +2149,6 @@ static void drbd_release_all_peer_reqs(struct drbd_device *device)
rr = drbd_free_peer_reqs(device, &device->done_ee);
if (rr)
drbd_err(device, "%d EEs in done list found!\n", rr);
rr = drbd_free_peer_reqs(device, &device->net_ee);
if (rr)
drbd_err(device, "%d EEs in net list found!\n", rr);
}
/* caution. no locking. */
@ -2863,11 +2839,6 @@ static int __init drbd_init(void)
return err;
}
/*
* allocate all necessary structs
*/
init_waitqueue_head(&drbd_pp_wait);
drbd_proc = NULL; /* play safe for drbd_cleanup */
idr_init(&drbd_devices);

View File

@ -33,6 +33,7 @@
#include <linux/string.h>
#include <linux/scatterlist.h>
#include <linux/part_stat.h>
#include <linux/mempool.h>
#include "drbd_int.h"
#include "drbd_protocol.h"
#include "drbd_req.h"
@ -63,182 +64,31 @@ static int e_end_block(struct drbd_work *, int);
#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
/*
* some helper functions to deal with single linked page lists,
* page->private being our "next" pointer.
*/
/* If at least n pages are linked at head, get n pages off.
* Otherwise, don't modify head, and return NULL.
* Locking is the responsibility of the caller.
*/
static struct page *page_chain_del(struct page **head, int n)
{
struct page *page;
struct page *tmp;
BUG_ON(!n);
BUG_ON(!head);
page = *head;
if (!page)
return NULL;
while (page) {
tmp = page_chain_next(page);
if (--n == 0)
break; /* found sufficient pages */
if (tmp == NULL)
/* insufficient pages, don't use any of them. */
return NULL;
page = tmp;
}
/* add end of list marker for the returned list */
set_page_private(page, 0);
/* actual return value, and adjustment of head */
page = *head;
*head = tmp;
return page;
}
/* may be used outside of locks to find the tail of a (usually short)
* "private" page chain, before adding it back to a global chain head
* with page_chain_add() under a spinlock. */
static struct page *page_chain_tail(struct page *page, int *len)
{
struct page *tmp;
int i = 1;
while ((tmp = page_chain_next(page))) {
++i;
page = tmp;
}
if (len)
*len = i;
return page;
}
static int page_chain_free(struct page *page)
{
struct page *tmp;
int i = 0;
page_chain_for_each_safe(page, tmp) {
put_page(page);
++i;
}
return i;
}
static void page_chain_add(struct page **head,
struct page *chain_first, struct page *chain_last)
{
#if 1
struct page *tmp;
tmp = page_chain_tail(chain_first, NULL);
BUG_ON(tmp != chain_last);
#endif
/* add chain to head */
set_page_private(chain_last, (unsigned long)*head);
*head = chain_first;
}
static struct page *__drbd_alloc_pages(struct drbd_device *device,
unsigned int number)
static struct page *__drbd_alloc_pages(unsigned int number)
{
struct page *page = NULL;
struct page *tmp = NULL;
unsigned int i = 0;
/* Yes, testing drbd_pp_vacant outside the lock is racy.
* So what. It saves a spin_lock. */
if (drbd_pp_vacant >= number) {
spin_lock(&drbd_pp_lock);
page = page_chain_del(&drbd_pp_pool, number);
if (page)
drbd_pp_vacant -= number;
spin_unlock(&drbd_pp_lock);
if (page)
return page;
}
/* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
* "criss-cross" setup, that might cause write-out on some other DRBD,
* which in turn might block on the other node at this very place. */
for (i = 0; i < number; i++) {
tmp = alloc_page(GFP_TRY);
tmp = mempool_alloc(&drbd_buffer_page_pool, GFP_TRY);
if (!tmp)
break;
goto fail;
set_page_private(tmp, (unsigned long)page);
page = tmp;
}
if (i == number)
return page;
/* Not enough pages immediately available this time.
* No need to jump around here, drbd_alloc_pages will retry this
* function "soon". */
if (page) {
tmp = page_chain_tail(page, NULL);
spin_lock(&drbd_pp_lock);
page_chain_add(&drbd_pp_pool, page, tmp);
drbd_pp_vacant += i;
spin_unlock(&drbd_pp_lock);
return page;
fail:
page_chain_for_each_safe(page, tmp) {
set_page_private(page, 0);
mempool_free(page, &drbd_buffer_page_pool);
}
return NULL;
}
static void reclaim_finished_net_peer_reqs(struct drbd_device *device,
struct list_head *to_be_freed)
{
struct drbd_peer_request *peer_req, *tmp;
/* The EEs are always appended to the end of the list. Since
they are sent in order over the wire, they have to finish
in order. As soon as we see the first not finished we can
stop to examine the list... */
list_for_each_entry_safe(peer_req, tmp, &device->net_ee, w.list) {
if (drbd_peer_req_has_active_page(peer_req))
break;
list_move(&peer_req->w.list, to_be_freed);
}
}
static void drbd_reclaim_net_peer_reqs(struct drbd_device *device)
{
LIST_HEAD(reclaimed);
struct drbd_peer_request *peer_req, *t;
spin_lock_irq(&device->resource->req_lock);
reclaim_finished_net_peer_reqs(device, &reclaimed);
spin_unlock_irq(&device->resource->req_lock);
list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
drbd_free_net_peer_req(device, peer_req);
}
static void conn_reclaim_net_peer_reqs(struct drbd_connection *connection)
{
struct drbd_peer_device *peer_device;
int vnr;
rcu_read_lock();
idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
struct drbd_device *device = peer_device->device;
if (!atomic_read(&device->pp_in_use_by_net))
continue;
kref_get(&device->kref);
rcu_read_unlock();
drbd_reclaim_net_peer_reqs(device);
kref_put(&device->kref, drbd_destroy_device);
rcu_read_lock();
}
rcu_read_unlock();
}
/**
* drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled)
* @peer_device: DRBD device.
@ -263,9 +113,8 @@ struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int
bool retry)
{
struct drbd_device *device = peer_device->device;
struct page *page = NULL;
struct page *page;
struct net_conf *nc;
DEFINE_WAIT(wait);
unsigned int mxb;
rcu_read_lock();
@ -273,37 +122,9 @@ struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int
mxb = nc ? nc->max_buffers : 1000000;
rcu_read_unlock();
if (atomic_read(&device->pp_in_use) < mxb)
page = __drbd_alloc_pages(device, number);
/* Try to keep the fast path fast, but occasionally we need
* to reclaim the pages we lended to the network stack. */
if (page && atomic_read(&device->pp_in_use_by_net) > 512)
drbd_reclaim_net_peer_reqs(device);
while (page == NULL) {
prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
drbd_reclaim_net_peer_reqs(device);
if (atomic_read(&device->pp_in_use) < mxb) {
page = __drbd_alloc_pages(device, number);
if (page)
break;
}
if (!retry)
break;
if (signal_pending(current)) {
drbd_warn(device, "drbd_alloc_pages interrupted!\n");
break;
}
if (schedule_timeout(HZ/10) == 0)
mxb = UINT_MAX;
}
finish_wait(&drbd_pp_wait, &wait);
if (atomic_read(&device->pp_in_use) >= mxb)
schedule_timeout_interruptible(HZ / 10);
page = __drbd_alloc_pages(number);
if (page)
atomic_add(number, &device->pp_in_use);
@ -314,29 +135,25 @@ struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int
* Is also used from inside an other spin_lock_irq(&resource->req_lock);
* Either links the page chain back to the global pool,
* or returns all pages to the system. */
static void drbd_free_pages(struct drbd_device *device, struct page *page, int is_net)
static void drbd_free_pages(struct drbd_device *device, struct page *page)
{
atomic_t *a = is_net ? &device->pp_in_use_by_net : &device->pp_in_use;
int i;
struct page *tmp;
int i = 0;
if (page == NULL)
return;
if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count)
i = page_chain_free(page);
else {
struct page *tmp;
tmp = page_chain_tail(page, &i);
spin_lock(&drbd_pp_lock);
page_chain_add(&drbd_pp_pool, page, tmp);
drbd_pp_vacant += i;
spin_unlock(&drbd_pp_lock);
page_chain_for_each_safe(page, tmp) {
set_page_private(page, 0);
if (page_count(page) == 1)
mempool_free(page, &drbd_buffer_page_pool);
else
put_page(page);
i++;
}
i = atomic_sub_return(i, a);
i = atomic_sub_return(i, &device->pp_in_use);
if (i < 0)
drbd_warn(device, "ASSERTION FAILED: %s: %d < 0\n",
is_net ? "pp_in_use_by_net" : "pp_in_use", i);
wake_up(&drbd_pp_wait);
drbd_warn(device, "ASSERTION FAILED: pp_in_use: %d < 0\n", i);
}
/*
@ -380,6 +197,8 @@ drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t secto
gfpflags_allow_blocking(gfp_mask));
if (!page)
goto fail;
if (!mempool_is_saturated(&drbd_buffer_page_pool))
peer_req->flags |= EE_RELEASE_TO_MEMPOOL;
}
memset(peer_req, 0, sizeof(*peer_req));
@ -403,13 +222,12 @@ drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t secto
return NULL;
}
void __drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req,
int is_net)
void drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req)
{
might_sleep();
if (peer_req->flags & EE_HAS_DIGEST)
kfree(peer_req->digest);
drbd_free_pages(device, peer_req->pages, is_net);
drbd_free_pages(device, peer_req->pages);
D_ASSERT(device, atomic_read(&peer_req->pending_bios) == 0);
D_ASSERT(device, drbd_interval_empty(&peer_req->i));
if (!expect(device, !(peer_req->flags & EE_CALL_AL_COMPLETE_IO))) {
@ -424,14 +242,13 @@ int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list)
LIST_HEAD(work_list);
struct drbd_peer_request *peer_req, *t;
int count = 0;
int is_net = list == &device->net_ee;
spin_lock_irq(&device->resource->req_lock);
list_splice_init(list, &work_list);
spin_unlock_irq(&device->resource->req_lock);
list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
__drbd_free_peer_req(device, peer_req, is_net);
drbd_free_peer_req(device, peer_req);
count++;
}
return count;
@ -443,18 +260,13 @@ int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list)
static int drbd_finish_peer_reqs(struct drbd_device *device)
{
LIST_HEAD(work_list);
LIST_HEAD(reclaimed);
struct drbd_peer_request *peer_req, *t;
int err = 0;
spin_lock_irq(&device->resource->req_lock);
reclaim_finished_net_peer_reqs(device, &reclaimed);
list_splice_init(&device->done_ee, &work_list);
spin_unlock_irq(&device->resource->req_lock);
list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
drbd_free_net_peer_req(device, peer_req);
/* possible callbacks here:
* e_end_block, and e_end_resync_block, e_send_superseded.
* all ignore the last argument.
@ -1975,7 +1787,7 @@ static int drbd_drain_block(struct drbd_peer_device *peer_device, int data_size)
data_size -= len;
}
kunmap(page);
drbd_free_pages(peer_device->device, page, 0);
drbd_free_pages(peer_device->device, page);
return err;
}
@ -5224,16 +5036,6 @@ static int drbd_disconnected(struct drbd_peer_device *peer_device)
put_ldev(device);
}
/* tcp_close and release of sendpage pages can be deferred. I don't
* want to use SO_LINGER, because apparently it can be deferred for
* more than 20 seconds (longest time I checked).
*
* Actually we don't care for exactly when the network stack does its
* put_page(), but release our reference on these pages right here.
*/
i = drbd_free_peer_reqs(device, &device->net_ee);
if (i)
drbd_info(device, "net_ee not empty, killed %u entries\n", i);
i = atomic_read(&device->pp_in_use_by_net);
if (i)
drbd_info(device, "pp_in_use_by_net = %d, expected 0\n", i);
@ -5980,8 +5782,6 @@ int drbd_ack_receiver(struct drbd_thread *thi)
while (get_t_state(thi) == RUNNING) {
drbd_thread_current_set_cpu(thi);
conn_reclaim_net_peer_reqs(connection);
if (test_and_clear_bit(SEND_PING, &connection->flags)) {
if (drbd_send_ping(connection)) {
drbd_err(connection, "drbd_send_ping has failed\n");

View File

@ -1030,22 +1030,6 @@ out:
return 1;
}
/* helper */
static void move_to_net_ee_or_free(struct drbd_device *device, struct drbd_peer_request *peer_req)
{
if (drbd_peer_req_has_active_page(peer_req)) {
/* This might happen if sendpage() has not finished */
int i = PFN_UP(peer_req->i.size);
atomic_add(i, &device->pp_in_use_by_net);
atomic_sub(i, &device->pp_in_use);
spin_lock_irq(&device->resource->req_lock);
list_add_tail(&peer_req->w.list, &device->net_ee);
spin_unlock_irq(&device->resource->req_lock);
wake_up(&drbd_pp_wait);
} else
drbd_free_peer_req(device, peer_req);
}
/**
* w_e_end_data_req() - Worker callback, to send a P_DATA_REPLY packet in response to a P_DATA_REQUEST
* @w: work object.
@ -1059,9 +1043,8 @@ int w_e_end_data_req(struct drbd_work *w, int cancel)
int err;
if (unlikely(cancel)) {
drbd_free_peer_req(device, peer_req);
dec_unacked(device);
return 0;
err = 0;
goto out;
}
if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
@ -1074,12 +1057,12 @@ int w_e_end_data_req(struct drbd_work *w, int cancel)
err = drbd_send_ack(peer_device, P_NEG_DREPLY, peer_req);
}
dec_unacked(device);
move_to_net_ee_or_free(device, peer_req);
if (unlikely(err))
drbd_err(device, "drbd_send_block() failed\n");
out:
dec_unacked(device);
drbd_free_peer_req(device, peer_req);
return err;
}
@ -1120,9 +1103,8 @@ int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
int err;
if (unlikely(cancel)) {
drbd_free_peer_req(device, peer_req);
dec_unacked(device);
return 0;
err = 0;
goto out;
}
if (get_ldev_if_state(device, D_FAILED)) {
@ -1155,13 +1137,12 @@ int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
/* update resync data with failure */
drbd_rs_failed_io(peer_device, peer_req->i.sector, peer_req->i.size);
}
dec_unacked(device);
move_to_net_ee_or_free(device, peer_req);
if (unlikely(err))
drbd_err(device, "drbd_send_block() failed\n");
out:
dec_unacked(device);
drbd_free_peer_req(device, peer_req);
return err;
}
@ -1176,9 +1157,8 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
int err, eq = 0;
if (unlikely(cancel)) {
drbd_free_peer_req(device, peer_req);
dec_unacked(device);
return 0;
err = 0;
goto out;
}
if (get_ldev(device)) {
@ -1220,12 +1200,12 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
if (drbd_ratelimit())
drbd_err(device, "Sending NegDReply. I guess it gets messy.\n");
}
dec_unacked(device);
move_to_net_ee_or_free(device, peer_req);
if (unlikely(err))
drbd_err(device, "drbd_send_block/ack() failed\n");
out:
dec_unacked(device);
drbd_free_peer_req(device, peer_req);
return err;
}

View File

@ -235,7 +235,7 @@ struct ublk_device {
struct completion completion;
unsigned int nr_queues_ready;
unsigned int nr_privileged_daemon;
bool unprivileged_daemons;
struct mutex cancel_mutex;
bool canceling;
pid_t ublksrv_tgid;
@ -1389,7 +1389,7 @@ static blk_status_t ublk_prep_req(struct ublk_queue *ubq, struct request *rq,
{
blk_status_t res;
if (unlikely(ubq->fail_io))
if (unlikely(READ_ONCE(ubq->fail_io)))
return BLK_STS_TARGET;
/* With recovery feature enabled, force_abort is set in
@ -1401,7 +1401,8 @@ static blk_status_t ublk_prep_req(struct ublk_queue *ubq, struct request *rq,
* Note: force_abort is guaranteed to be seen because it is set
* before request queue is unqiuesced.
*/
if (ublk_nosrv_should_queue_io(ubq) && unlikely(ubq->force_abort))
if (ublk_nosrv_should_queue_io(ubq) &&
unlikely(READ_ONCE(ubq->force_abort)))
return BLK_STS_IOERR;
if (check_cancel && unlikely(ubq->canceling))
@ -1550,7 +1551,7 @@ static void ublk_reset_ch_dev(struct ublk_device *ub)
/* set to NULL, otherwise new tasks cannot mmap io_cmd_buf */
ub->mm = NULL;
ub->nr_queues_ready = 0;
ub->nr_privileged_daemon = 0;
ub->unprivileged_daemons = false;
ub->ublksrv_tgid = -1;
}
@ -1644,7 +1645,6 @@ static int ublk_ch_release(struct inode *inode, struct file *filp)
* Transition the device to the nosrv state. What exactly this
* means depends on the recovery flags
*/
blk_mq_quiesce_queue(disk->queue);
if (ublk_nosrv_should_stop_dev(ub)) {
/*
* Allow any pending/future I/O to pass through quickly
@ -1652,8 +1652,7 @@ static int ublk_ch_release(struct inode *inode, struct file *filp)
* waits for all pending I/O to complete
*/
for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
ublk_get_queue(ub, i)->force_abort = true;
blk_mq_unquiesce_queue(disk->queue);
WRITE_ONCE(ublk_get_queue(ub, i)->force_abort, true);
ublk_stop_dev_unlocked(ub);
} else {
@ -1663,9 +1662,8 @@ static int ublk_ch_release(struct inode *inode, struct file *filp)
} else {
ub->dev_info.state = UBLK_S_DEV_FAIL_IO;
for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
ublk_get_queue(ub, i)->fail_io = true;
WRITE_ONCE(ublk_get_queue(ub, i)->fail_io, true);
}
blk_mq_unquiesce_queue(disk->queue);
}
unlock:
mutex_unlock(&ub->mutex);
@ -1980,12 +1978,10 @@ static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
__must_hold(&ub->mutex)
{
ubq->nr_io_ready++;
if (ublk_queue_ready(ubq)) {
if (ublk_queue_ready(ubq))
ub->nr_queues_ready++;
if (capable(CAP_SYS_ADMIN))
ub->nr_privileged_daemon++;
}
if (!ub->unprivileged_daemons && !capable(CAP_SYS_ADMIN))
ub->unprivileged_daemons = true;
if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues) {
/* now we are ready for handling ublk io request */
@ -2880,8 +2876,8 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub,
ublk_apply_params(ub);
/* don't probe partitions if any one ubq daemon is un-trusted */
if (ub->nr_privileged_daemon != ub->nr_queues_ready)
/* don't probe partitions if any daemon task is un-trusted */
if (ub->unprivileged_daemons)
set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
ublk_get_device(ub);

View File

@ -2793,6 +2793,7 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
X86_MATCH(INTEL_GRANITERAPIDS_X, core_funcs),
X86_MATCH(INTEL_ATOM_CRESTMONT, core_funcs),
X86_MATCH(INTEL_ATOM_CRESTMONT_X, core_funcs),
X86_MATCH(INTEL_ATOM_DARKMONT_X, core_funcs),
{}
};
#endif

View File

@ -97,6 +97,14 @@ static inline int which_bucket(u64 duration_ns)
static DEFINE_PER_CPU(struct menu_device, menu_devices);
static void menu_update_intervals(struct menu_device *data, unsigned int interval_us)
{
/* Update the repeating-pattern data. */
data->intervals[data->interval_ptr++] = interval_us;
if (data->interval_ptr >= INTERVALS)
data->interval_ptr = 0;
}
static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
/*
@ -222,6 +230,14 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
if (data->needs_update) {
menu_update(drv, dev);
data->needs_update = 0;
} else if (!dev->last_residency_ns) {
/*
* This happens when the driver rejects the previously selected
* idle state and returns an error, so update the recent
* intervals table to prevent invalid information from being
* used going forward.
*/
menu_update_intervals(data, UINT_MAX);
}
/* Find the shortest expected idle interval. */
@ -482,10 +498,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
data->correction_factor[data->bucket] = new_factor;
/* update the repeating-pattern data */
data->intervals[data->interval_ptr++] = ktime_to_us(measured_ns);
if (data->interval_ptr >= INTERVALS)
data->interval_ptr = 0;
menu_update_intervals(data, ktime_to_us(measured_ns));
}
/**

View File

@ -550,6 +550,23 @@ const struct fw_address_region fw_unit_space_region =
{ .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, };
#endif /* 0 */
static void complete_address_handler(struct kref *kref)
{
struct fw_address_handler *handler = container_of(kref, struct fw_address_handler, kref);
complete(&handler->done);
}
static void get_address_handler(struct fw_address_handler *handler)
{
kref_get(&handler->kref);
}
static int put_address_handler(struct fw_address_handler *handler)
{
return kref_put(&handler->kref, complete_address_handler);
}
/**
* fw_core_add_address_handler() - register for incoming requests
* @handler: callback
@ -596,6 +613,8 @@ int fw_core_add_address_handler(struct fw_address_handler *handler,
if (other != NULL) {
handler->offset += other->length;
} else {
init_completion(&handler->done);
kref_init(&handler->kref);
list_add_tail_rcu(&handler->link, &address_handler_list);
ret = 0;
break;
@ -621,6 +640,9 @@ void fw_core_remove_address_handler(struct fw_address_handler *handler)
list_del_rcu(&handler->link);
synchronize_rcu();
if (!put_address_handler(handler))
wait_for_completion(&handler->done);
}
EXPORT_SYMBOL(fw_core_remove_address_handler);
@ -914,22 +936,31 @@ static void handle_exclusive_region_request(struct fw_card *card,
handler = lookup_enclosing_address_handler(&address_handler_list, offset,
request->length);
if (handler)
handler->address_callback(card, request, tcode, destination, source,
p->generation, offset, request->data,
request->length, handler->callback_data);
get_address_handler(handler);
}
if (!handler)
if (!handler) {
fw_send_response(card, request, RCODE_ADDRESS_ERROR);
return;
}
// Outside the RCU read-side critical section. Without spinlock. With reference count.
handler->address_callback(card, request, tcode, destination, source, p->generation, offset,
request->data, request->length, handler->callback_data);
put_address_handler(handler);
}
// To use kmalloc allocator efficiently, this should be power of two.
#define BUFFER_ON_KERNEL_STACK_SIZE 4
static void handle_fcp_region_request(struct fw_card *card,
struct fw_packet *p,
struct fw_request *request,
unsigned long long offset)
{
struct fw_address_handler *handler;
int tcode, destination, source;
struct fw_address_handler *buffer_on_kernel_stack[BUFFER_ON_KERNEL_STACK_SIZE];
struct fw_address_handler *handler, **handlers;
int tcode, destination, source, i, count, buffer_size;
if ((offset != (CSR_REGISTER_BASE | CSR_FCP_COMMAND) &&
offset != (CSR_REGISTER_BASE | CSR_FCP_RESPONSE)) ||
@ -950,15 +981,55 @@ static void handle_fcp_region_request(struct fw_card *card,
return;
}
count = 0;
handlers = buffer_on_kernel_stack;
buffer_size = ARRAY_SIZE(buffer_on_kernel_stack);
scoped_guard(rcu) {
list_for_each_entry_rcu(handler, &address_handler_list, link) {
if (is_enclosing_handler(handler, offset, request->length))
handler->address_callback(card, request, tcode, destination, source,
p->generation, offset, request->data,
request->length, handler->callback_data);
if (is_enclosing_handler(handler, offset, request->length)) {
if (count >= buffer_size) {
int next_size = buffer_size * 2;
struct fw_address_handler **buffer_on_kernel_heap;
if (handlers == buffer_on_kernel_stack)
buffer_on_kernel_heap = NULL;
else
buffer_on_kernel_heap = handlers;
buffer_on_kernel_heap =
krealloc_array(buffer_on_kernel_heap, next_size,
sizeof(*buffer_on_kernel_heap), GFP_ATOMIC);
// FCP is used for purposes unrelated to significant system
// resources (e.g. storage or networking), so allocation
// failures are not considered so critical.
if (!buffer_on_kernel_heap)
break;
if (handlers == buffer_on_kernel_stack) {
memcpy(buffer_on_kernel_heap, buffer_on_kernel_stack,
sizeof(buffer_on_kernel_stack));
}
handlers = buffer_on_kernel_heap;
buffer_size = next_size;
}
get_address_handler(handler);
handlers[count++] = handler;
}
}
}
for (i = 0; i < count; ++i) {
handler = handlers[i];
handler->address_callback(card, request, tcode, destination, source,
p->generation, offset, request->data,
request->length, handler->callback_data);
put_address_handler(handler);
}
if (handlers != buffer_on_kernel_stack)
kfree(handlers);
fw_send_response(card, request, RCODE_COMPLETE);
}

View File

@ -190,9 +190,7 @@ static int mlxbf3_gpio_probe(struct platform_device *pdev)
struct mlxbf3_gpio_context *gs;
struct gpio_irq_chip *girq;
struct gpio_chip *gc;
char *colon_ptr;
int ret, irq;
long num;
gs = devm_kzalloc(dev, sizeof(*gs), GFP_KERNEL);
if (!gs)
@ -229,39 +227,25 @@ static int mlxbf3_gpio_probe(struct platform_device *pdev)
gc->owner = THIS_MODULE;
gc->add_pin_ranges = mlxbf3_gpio_add_pin_ranges;
colon_ptr = strchr(dev_name(dev), ':');
if (!colon_ptr) {
dev_err(dev, "invalid device name format\n");
return -EINVAL;
}
irq = platform_get_irq_optional(pdev, 0);
if (irq >= 0) {
girq = &gs->gc.irq;
gpio_irq_chip_set_chip(girq, &gpio_mlxbf3_irqchip);
girq->default_type = IRQ_TYPE_NONE;
/* This will let us handle the parent IRQ in the driver */
girq->num_parents = 0;
girq->parents = NULL;
girq->parent_handler = NULL;
girq->handler = handle_bad_irq;
ret = kstrtol(++colon_ptr, 16, &num);
if (ret) {
dev_err(dev, "invalid device instance\n");
return ret;
}
if (!num) {
irq = platform_get_irq(pdev, 0);
if (irq >= 0) {
girq = &gs->gc.irq;
gpio_irq_chip_set_chip(girq, &gpio_mlxbf3_irqchip);
girq->default_type = IRQ_TYPE_NONE;
/* This will let us handle the parent IRQ in the driver */
girq->num_parents = 0;
girq->parents = NULL;
girq->parent_handler = NULL;
girq->handler = handle_bad_irq;
/*
* Directly request the irq here instead of passing
* a flow-handler because the irq is shared.
*/
ret = devm_request_irq(dev, irq, mlxbf3_gpio_irq_handler,
IRQF_SHARED, dev_name(dev), gs);
if (ret)
return dev_err_probe(dev, ret, "failed to request IRQ");
}
/*
* Directly request the irq here instead of passing
* a flow-handler because the irq is shared.
*/
ret = devm_request_irq(dev, irq, mlxbf3_gpio_irq_handler,
IRQF_SHARED, dev_name(dev), gs);
if (ret)
return dev_err_probe(dev, ret, "failed to request IRQ");
}
platform_set_drvdata(pdev, gs);

View File

@ -1139,6 +1139,9 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
}
}
if (!amdgpu_vm_ready(vm))
return -EINVAL;
r = amdgpu_vm_clear_freed(adev, vm, NULL);
if (r)
return r;

View File

@ -88,8 +88,8 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size,
AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
AMDGPU_PTE_EXECUTABLE);
AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
AMDGPU_VM_PAGE_EXECUTABLE);
if (r) {
DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);

View File

@ -1039,15 +1039,28 @@ int psp_update_fw_reservation(struct psp_context *psp)
{
int ret;
uint64_t reserv_addr, reserv_addr_ext;
uint32_t reserv_size, reserv_size_ext;
uint32_t reserv_size, reserv_size_ext, mp0_ip_ver;
struct amdgpu_device *adev = psp->adev;
mp0_ip_ver = amdgpu_ip_version(adev, MP0_HWIP, 0);
if (amdgpu_sriov_vf(psp->adev))
return 0;
if ((amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(14, 0, 2)) &&
(amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(14, 0, 3)))
switch (mp0_ip_ver) {
case IP_VERSION(14, 0, 2):
if (adev->psp.sos.fw_version < 0x3b0e0d)
return 0;
break;
case IP_VERSION(14, 0, 3):
if (adev->psp.sos.fw_version < 0x3a0e14)
return 0;
break;
default:
return 0;
}
ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_ADDR, &reserv_addr, &reserv_size);
if (ret)

View File

@ -654,11 +654,10 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
* Check if all VM PDs/PTs are ready for updates
*
* Returns:
* True if VM is not evicting.
* True if VM is not evicting and all VM entities are not stopped
*/
bool amdgpu_vm_ready(struct amdgpu_vm *vm)
{
bool empty;
bool ret;
amdgpu_vm_eviction_lock(vm);
@ -666,10 +665,18 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
amdgpu_vm_eviction_unlock(vm);
spin_lock(&vm->status_lock);
empty = list_empty(&vm->evicted);
ret &= list_empty(&vm->evicted);
spin_unlock(&vm->status_lock);
return ret && empty;
spin_lock(&vm->immediate.lock);
ret &= !vm->immediate.stopped;
spin_unlock(&vm->immediate.lock);
spin_lock(&vm->delayed.lock);
ret &= !vm->delayed.stopped;
spin_unlock(&vm->delayed.lock);
return ret;
}
/**

View File

@ -648,9 +648,8 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
list_for_each_entry(block, &vres->blocks, link)
vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
amdgpu_vram_mgr_do_reserve(man);
drm_buddy_free_list(mm, &vres->blocks, vres->flags);
amdgpu_vram_mgr_do_reserve(man);
mutex_unlock(&mgr->lock);
atomic64_sub(vis_usage, &mgr->vis_usage);

View File

@ -18,6 +18,7 @@ static void drm_aux_bridge_release(struct device *dev)
{
struct auxiliary_device *adev = to_auxiliary_dev(dev);
of_node_put(dev->of_node);
ida_free(&drm_aux_bridge_ida, adev->id);
kfree(adev);
@ -65,6 +66,7 @@ int drm_aux_bridge_register(struct device *parent)
ret = auxiliary_device_init(adev);
if (ret) {
of_node_put(adev->dev.of_node);
ida_free(&drm_aux_bridge_ida, adev->id);
kfree(adev);
return ret;

View File

@ -1227,6 +1227,7 @@ EXPORT_SYMBOL(drm_atomic_bridge_chain_check);
/**
* drm_bridge_detect - check if anything is attached to the bridge output
* @bridge: bridge control structure
* @connector: attached connector
*
* If the bridge supports output detection, as reported by the
* DRM_BRIDGE_OP_DETECT bridge ops flag, call &drm_bridge_funcs.detect for the

View File

@ -552,10 +552,6 @@ static void ilk_fbc_deactivate(struct intel_fbc *fbc)
if (dpfc_ctl & DPFC_CTL_EN) {
dpfc_ctl &= ~DPFC_CTL_EN;
intel_de_write(display, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl);
/* wa_18038517565 Enable DPFC clock gating after FBC disable */
if (display->platform.dg2 || DISPLAY_VER(display) >= 14)
fbc_compressor_clkgate_disable_wa(fbc, false);
}
}
@ -1710,6 +1706,10 @@ static void __intel_fbc_disable(struct intel_fbc *fbc)
__intel_fbc_cleanup_cfb(fbc);
/* wa_18038517565 Enable DPFC clock gating after FBC disable */
if (display->platform.dg2 || DISPLAY_VER(display) >= 14)
fbc_compressor_clkgate_disable_wa(fbc, false);
fbc->state.plane = NULL;
fbc->flip_pending = false;
fbc->busy_bits = 0;

View File

@ -3275,7 +3275,9 @@ static void intel_psr_configure_full_frame_update(struct intel_dp *intel_dp)
static void _psr_invalidate_handle(struct intel_dp *intel_dp)
{
if (intel_dp->psr.psr2_sel_fetch_enabled) {
struct intel_display *display = to_intel_display(intel_dp);
if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_enabled) {
if (!intel_dp->psr.psr2_sel_fetch_cff_enabled) {
intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
intel_psr_configure_full_frame_update(intel_dp);
@ -3361,7 +3363,7 @@ static void _psr_flush_handle(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
if (intel_dp->psr.psr2_sel_fetch_enabled) {
if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_enabled) {
if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
/* can we turn CFF off? */
if (intel_dp->psr.busy_frontbuffer_bits == 0)
@ -3378,11 +3380,13 @@ static void _psr_flush_handle(struct intel_dp *intel_dp)
* existing SU configuration
*/
intel_psr_configure_full_frame_update(intel_dp);
intel_psr_force_update(intel_dp);
} else {
intel_psr_exit(intel_dp);
}
intel_psr_force_update(intel_dp);
if (!intel_dp->psr.psr2_sel_fetch_enabled && !intel_dp->psr.active &&
if ((!intel_dp->psr.psr2_sel_fetch_enabled || DISPLAY_VER(display) >= 20) &&
!intel_dp->psr.busy_frontbuffer_bits)
queue_work(display->wq.unordered, &intel_dp->psr.work);
}

View File

@ -253,6 +253,7 @@ nouveau_check_bl_size(struct nouveau_drm *drm, struct nouveau_bo *nvbo,
int
nouveau_framebuffer_new(struct drm_device *dev,
const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *gem,
struct drm_framebuffer **pfb)
@ -260,7 +261,6 @@ nouveau_framebuffer_new(struct drm_device *dev,
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct drm_framebuffer *fb;
const struct drm_format_info *info;
unsigned int height, i;
uint32_t tile_mode;
uint8_t kind;
@ -295,9 +295,6 @@ nouveau_framebuffer_new(struct drm_device *dev,
kind = nvbo->kind;
}
info = drm_get_format_info(dev, mode_cmd->pixel_format,
mode_cmd->modifier[0]);
for (i = 0; i < info->num_planes; i++) {
height = drm_format_info_plane_height(info,
mode_cmd->height,
@ -321,7 +318,7 @@ nouveau_framebuffer_new(struct drm_device *dev,
if (!(fb = *pfb = kzalloc(sizeof(*fb), GFP_KERNEL)))
return -ENOMEM;
drm_helper_mode_fill_fb_struct(dev, fb, NULL, mode_cmd);
drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd);
fb->obj[0] = gem;
ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs);
@ -344,7 +341,7 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
if (!gem)
return ERR_PTR(-ENOENT);
ret = nouveau_framebuffer_new(dev, mode_cmd, gem, &fb);
ret = nouveau_framebuffer_new(dev, info, mode_cmd, gem, &fb);
if (ret == 0)
return fb;

View File

@ -8,8 +8,11 @@
#include <drm/drm_framebuffer.h>
struct drm_format_info;
int
nouveau_framebuffer_new(struct drm_device *dev,
const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *gem,
struct drm_framebuffer **pfb);

View File

@ -351,7 +351,7 @@ struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
}
}
fb = omap_framebuffer_init(dev, mode_cmd, bos);
fb = omap_framebuffer_init(dev, info, mode_cmd, bos);
if (IS_ERR(fb))
goto error;
@ -365,9 +365,9 @@ error:
}
struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos)
{
const struct drm_format_info *format = NULL;
struct omap_framebuffer *omap_fb = NULL;
struct drm_framebuffer *fb = NULL;
unsigned int pitch = mode_cmd->pitches[0];
@ -377,15 +377,12 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
dev, mode_cmd, mode_cmd->width, mode_cmd->height,
(char *)&mode_cmd->pixel_format);
format = drm_get_format_info(dev, mode_cmd->pixel_format,
mode_cmd->modifier[0]);
for (i = 0; i < ARRAY_SIZE(formats); i++) {
if (formats[i] == mode_cmd->pixel_format)
break;
}
if (!format || i == ARRAY_SIZE(formats)) {
if (i == ARRAY_SIZE(formats)) {
dev_dbg(dev->dev, "unsupported pixel format: %4.4s\n",
(char *)&mode_cmd->pixel_format);
ret = -EINVAL;
@ -399,7 +396,7 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
}
fb = &omap_fb->base;
omap_fb->format = format;
omap_fb->format = info;
mutex_init(&omap_fb->lock);
/*
@ -407,23 +404,23 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
* that the two planes of multiplane formats need the same number of
* bytes per pixel.
*/
if (format->num_planes == 2 && pitch != mode_cmd->pitches[1]) {
if (info->num_planes == 2 && pitch != mode_cmd->pitches[1]) {
dev_dbg(dev->dev, "pitches differ between planes 0 and 1\n");
ret = -EINVAL;
goto fail;
}
if (pitch % format->cpp[0]) {
if (pitch % info->cpp[0]) {
dev_dbg(dev->dev,
"buffer pitch (%u bytes) is not a multiple of pixel size (%u bytes)\n",
pitch, format->cpp[0]);
pitch, info->cpp[0]);
ret = -EINVAL;
goto fail;
}
for (i = 0; i < format->num_planes; i++) {
for (i = 0; i < info->num_planes; i++) {
struct plane *plane = &omap_fb->planes[i];
unsigned int vsub = i == 0 ? 1 : format->vsub;
unsigned int vsub = i == 0 ? 1 : info->vsub;
unsigned int size;
size = pitch * mode_cmd->height / vsub;
@ -440,7 +437,7 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
plane->dma_addr = 0;
}
drm_helper_mode_fill_fb_struct(dev, fb, NULL, mode_cmd);
drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd);
ret = drm_framebuffer_init(dev, fb, &omap_framebuffer_funcs);
if (ret) {

View File

@ -13,6 +13,7 @@ struct drm_connector;
struct drm_device;
struct drm_file;
struct drm_framebuffer;
struct drm_format_info;
struct drm_gem_object;
struct drm_mode_fb_cmd2;
struct drm_plane_state;
@ -23,6 +24,7 @@ struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
struct drm_file *file, const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd);
struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
int omap_framebuffer_pin(struct drm_framebuffer *fb);
void omap_framebuffer_unpin(struct drm_framebuffer *fb);

View File

@ -197,7 +197,10 @@ int omap_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
goto fail;
}
fb = omap_framebuffer_init(dev, &mode_cmd, &bo);
fb = omap_framebuffer_init(dev,
drm_get_format_info(dev, mode_cmd.pixel_format,
mode_cmd.modifier[0]),
&mode_cmd, &bo);
if (IS_ERR(fb)) {
dev_err(dev->dev, "failed to allocate fb\n");
/* note: if fb creation failed, we can't rely on fb destroy

View File

@ -432,7 +432,7 @@ static void panfrost_gem_debugfs_bo_print(struct panfrost_gem_object *bo,
if (!refcount)
return;
resident_size = bo->base.pages ? bo->base.base.size : 0;
resident_size = panfrost_gem_rss(&bo->base.base);
snprintf(creator_info, sizeof(creator_info),
"%s/%d", bo->debugfs.creator.process_name, bo->debugfs.creator.tgid);

View File

@ -1297,12 +1297,13 @@ static const struct drm_framebuffer_funcs radeon_fb_funcs = {
int
radeon_framebuffer_init(struct drm_device *dev,
struct drm_framebuffer *fb,
const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
int ret;
fb->obj[0] = obj;
drm_helper_mode_fill_fb_struct(dev, fb, NULL, mode_cmd);
drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd);
ret = drm_framebuffer_init(dev, fb, &radeon_fb_funcs);
if (ret) {
fb->obj[0] = NULL;
@ -1341,7 +1342,7 @@ radeon_user_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-ENOMEM);
}
ret = radeon_framebuffer_init(dev, fb, mode_cmd, obj);
ret = radeon_framebuffer_init(dev, fb, info, mode_cmd, obj);
if (ret) {
kfree(fb);
drm_gem_object_put(obj);

View File

@ -53,10 +53,10 @@ static void radeon_fbdev_destroy_pinned_object(struct drm_gem_object *gobj)
}
static int radeon_fbdev_create_pinned_object(struct drm_fb_helper *fb_helper,
const struct drm_format_info *info,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object **gobj_p)
{
const struct drm_format_info *info;
struct radeon_device *rdev = fb_helper->dev->dev_private;
struct drm_gem_object *gobj = NULL;
struct radeon_bo *rbo = NULL;
@ -67,8 +67,6 @@ static int radeon_fbdev_create_pinned_object(struct drm_fb_helper *fb_helper,
int height = mode_cmd->height;
u32 cpp;
info = drm_get_format_info(rdev_to_drm(rdev), mode_cmd->pixel_format,
mode_cmd->modifier[0]);
cpp = info->cpp[0];
/* need to align pitch with crtc limits */
@ -206,6 +204,7 @@ int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_surface_size *sizes)
{
struct radeon_device *rdev = fb_helper->dev->dev_private;
const struct drm_format_info *format_info;
struct drm_mode_fb_cmd2 mode_cmd = { };
struct fb_info *info;
struct drm_gem_object *gobj;
@ -224,7 +223,9 @@ int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
sizes->surface_depth);
ret = radeon_fbdev_create_pinned_object(fb_helper, &mode_cmd, &gobj);
format_info = drm_get_format_info(rdev_to_drm(rdev), mode_cmd.pixel_format,
mode_cmd.modifier[0]);
ret = radeon_fbdev_create_pinned_object(fb_helper, format_info, &mode_cmd, &gobj);
if (ret) {
DRM_ERROR("failed to create fbcon object %d\n", ret);
return ret;
@ -236,7 +237,7 @@ int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
ret = -ENOMEM;
goto err_radeon_fbdev_destroy_pinned_object;
}
ret = radeon_framebuffer_init(rdev_to_drm(rdev), fb, &mode_cmd, gobj);
ret = radeon_framebuffer_init(rdev_to_drm(rdev), fb, format_info, &mode_cmd, gobj);
if (ret) {
DRM_ERROR("failed to initialize framebuffer %d\n", ret);
goto err_kfree;

View File

@ -40,6 +40,7 @@
struct drm_fb_helper;
struct drm_fb_helper_surface_size;
struct drm_format_info;
struct edid;
struct drm_edid;
@ -890,6 +891,7 @@ extern void
radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on);
int radeon_framebuffer_init(struct drm_device *dev,
struct drm_framebuffer *rfb,
const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj);

View File

@ -7,5 +7,6 @@
#define GTTMMADR_BAR 0 /* MMIO + GTT */
#define LMEM_BAR 2 /* VRAM */
#define VF_LMEM_BAR 9 /* VF VRAM */
#endif

View File

@ -332,6 +332,7 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, u32 attr, int channe
int ret = 0;
u32 reg_val, max;
struct xe_reg rapl_limit;
u64 max_supp_power_limit = 0;
mutex_lock(&hwmon->hwmon_lock);
@ -356,6 +357,20 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, u32 attr, int channe
goto unlock;
}
/*
* If the sysfs value exceeds the maximum pcode supported power limit value, clamp it to
* the supported maximum (U12.3 format).
* This is to avoid truncation during reg_val calculation below and ensure the valid
* power limit is sent for pcode which would clamp it to card-supported value.
*/
max_supp_power_limit = ((PWR_LIM_VAL) >> hwmon->scl_shift_power) * SF_POWER;
if (value > max_supp_power_limit) {
value = max_supp_power_limit;
drm_info(&hwmon->xe->drm,
"Power limit clamped as selected %s exceeds channel %d limit\n",
PWR_ATTR_TO_STR(attr), channel);
}
/* Computation in 64-bits to avoid overflow. Round to nearest. */
reg_val = DIV_ROUND_CLOSEST_ULL((u64)value << hwmon->scl_shift_power, SF_POWER);
@ -739,9 +754,23 @@ static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon, int channel,
{
int ret;
u32 uval;
u64 max_crit_power_curr = 0;
mutex_lock(&hwmon->hwmon_lock);
/*
* If the sysfs value exceeds the pcode mailbox cmd POWER_SETUP_SUBCOMMAND_WRITE_I1
* max supported value, clamp it to the command's max (U10.6 format).
* This is to avoid truncation during uval calculation below and ensure the valid power
* limit is sent for pcode which would clamp it to card-supported value.
*/
max_crit_power_curr = (POWER_SETUP_I1_DATA_MASK >> POWER_SETUP_I1_SHIFT) * scale_factor;
if (value > max_crit_power_curr) {
value = max_crit_power_curr;
drm_info(&hwmon->xe->drm,
"Power limit clamped as selected exceeds channel %d limit\n",
channel);
}
uval = DIV_ROUND_CLOSEST_ULL(value << POWER_SETUP_I1_SHIFT, scale_factor);
ret = xe_hwmon_pcode_write_i1(hwmon, uval);

View File

@ -1820,15 +1820,19 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
if (!IS_ALIGNED(len, XE_CACHELINE_BYTES) ||
!IS_ALIGNED((unsigned long)buf + offset, XE_CACHELINE_BYTES)) {
int buf_offset = 0;
void *bounce;
int err;
BUILD_BUG_ON(!is_power_of_2(XE_CACHELINE_BYTES));
bounce = kmalloc(XE_CACHELINE_BYTES, GFP_KERNEL);
if (!bounce)
return -ENOMEM;
/*
* Less than ideal for large unaligned access but this should be
* fairly rare, can fixup if this becomes common.
*/
do {
u8 bounce[XE_CACHELINE_BYTES];
void *ptr = (void *)bounce;
int err;
int copy_bytes = min_t(int, bytes_left,
XE_CACHELINE_BYTES -
(offset & XE_CACHELINE_MASK));
@ -1837,22 +1841,22 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
err = xe_migrate_access_memory(m, bo,
offset &
~XE_CACHELINE_MASK,
(void *)ptr,
sizeof(bounce), 0);
bounce,
XE_CACHELINE_BYTES, 0);
if (err)
return err;
break;
if (write) {
memcpy(ptr + ptr_offset, buf + buf_offset, copy_bytes);
memcpy(bounce + ptr_offset, buf + buf_offset, copy_bytes);
err = xe_migrate_access_memory(m, bo,
offset & ~XE_CACHELINE_MASK,
(void *)ptr,
sizeof(bounce), write);
bounce,
XE_CACHELINE_BYTES, write);
if (err)
return err;
break;
} else {
memcpy(buf + buf_offset, ptr + ptr_offset,
memcpy(buf + buf_offset, bounce + ptr_offset,
copy_bytes);
}
@ -1861,7 +1865,8 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
offset += copy_bytes;
} while (bytes_left);
return 0;
kfree(bounce);
return err;
}
dma_addr = xe_migrate_dma_map(xe, buf, len + page_offset, write);
@ -1882,8 +1887,11 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
else
current_bytes = min_t(int, bytes_left, cursor.size);
if (fence)
dma_fence_put(fence);
if (current_bytes & ~PAGE_MASK) {
int pitch = 4;
current_bytes = min_t(int, current_bytes, S16_MAX * pitch);
}
__fence = xe_migrate_vram(m, current_bytes,
(unsigned long)buf & ~PAGE_MASK,
@ -1892,11 +1900,15 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
XE_MIGRATE_COPY_TO_VRAM :
XE_MIGRATE_COPY_TO_SRAM);
if (IS_ERR(__fence)) {
if (fence)
if (fence) {
dma_fence_wait(fence, false);
dma_fence_put(fence);
}
fence = __fence;
goto out_err;
}
dma_fence_put(fence);
fence = __fence;
buf += current_bytes;

View File

@ -3,6 +3,10 @@
* Copyright © 2023-2024 Intel Corporation
*/
#include <linux/bitops.h>
#include <linux/pci.h>
#include "regs/xe_bars.h"
#include "xe_assert.h"
#include "xe_device.h"
#include "xe_gt_sriov_pf_config.h"
@ -128,6 +132,18 @@ static void pf_engine_activity_stats(struct xe_device *xe, unsigned int num_vfs,
}
}
static int resize_vf_vram_bar(struct xe_device *xe, int num_vfs)
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
u32 sizes;
sizes = pci_iov_vf_bar_get_sizes(pdev, VF_LMEM_BAR, num_vfs);
if (!sizes)
return 0;
return pci_iov_vf_bar_set_size(pdev, VF_LMEM_BAR, __fls(sizes));
}
static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
@ -158,6 +174,12 @@ static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
if (err < 0)
goto failed;
if (IS_DGFX(xe)) {
err = resize_vf_vram_bar(xe, num_vfs);
if (err)
xe_sriov_info(xe, "Failed to set VF LMEM BAR size: %d\n", err);
}
err = pci_enable_sriov(pdev, num_vfs);
if (err < 0)
goto failed;

View File

@ -54,10 +54,10 @@ xe_shrinker_mod_pages(struct xe_shrinker *shrinker, long shrinkable, long purgea
write_unlock(&shrinker->lock);
}
static s64 xe_shrinker_walk(struct xe_device *xe,
struct ttm_operation_ctx *ctx,
const struct xe_bo_shrink_flags flags,
unsigned long to_scan, unsigned long *scanned)
static s64 __xe_shrinker_walk(struct xe_device *xe,
struct ttm_operation_ctx *ctx,
const struct xe_bo_shrink_flags flags,
unsigned long to_scan, unsigned long *scanned)
{
unsigned int mem_type;
s64 freed = 0, lret;
@ -93,6 +93,48 @@ static s64 xe_shrinker_walk(struct xe_device *xe,
return freed;
}
/*
* Try shrinking idle objects without writeback first, then if not sufficient,
* try also non-idle objects and finally if that's not sufficient either,
* add writeback. This avoids stalls and explicit writebacks with light or
* moderate memory pressure.
*/
static s64 xe_shrinker_walk(struct xe_device *xe,
struct ttm_operation_ctx *ctx,
const struct xe_bo_shrink_flags flags,
unsigned long to_scan, unsigned long *scanned)
{
bool no_wait_gpu = true;
struct xe_bo_shrink_flags save_flags = flags;
s64 lret, freed;
swap(no_wait_gpu, ctx->no_wait_gpu);
save_flags.writeback = false;
lret = __xe_shrinker_walk(xe, ctx, save_flags, to_scan, scanned);
swap(no_wait_gpu, ctx->no_wait_gpu);
if (lret < 0 || *scanned >= to_scan)
return lret;
freed = lret;
if (!ctx->no_wait_gpu) {
lret = __xe_shrinker_walk(xe, ctx, save_flags, to_scan, scanned);
if (lret < 0)
return lret;
freed += lret;
if (*scanned >= to_scan)
return freed;
}
if (flags.writeback) {
lret = __xe_shrinker_walk(xe, ctx, flags, to_scan, scanned);
if (lret < 0)
return lret;
freed += lret;
}
return freed;
}
static unsigned long
xe_shrinker_count(struct shrinker *shrink, struct shrink_control *sc)
{
@ -199,6 +241,7 @@ static unsigned long xe_shrinker_scan(struct shrinker *shrink, struct shrink_con
runtime_pm = xe_shrinker_runtime_pm_get(shrinker, true, 0, can_backup);
shrink_flags.purge = false;
lret = xe_shrinker_walk(shrinker->xe, &ctx, shrink_flags,
nr_to_scan, &nr_scanned);
if (lret >= 0)

View File

@ -1679,7 +1679,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
};
static const struct x86_cpu_id intel_mwait_ids[] __initconst = {
X86_MATCH_VENDOR_FAM_FEATURE(INTEL, 6, X86_FEATURE_MWAIT, NULL),
X86_MATCH_VENDOR_FAM_FEATURE(INTEL, X86_FAMILY_ANY, X86_FEATURE_MWAIT, NULL),
{}
};

View File

@ -36,15 +36,14 @@
static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
{
regmap_update_bits(ksz_regmap_8(dev), addr, bits, set ? bits : 0);
ksz_rmw8(dev, addr, bits, set ? bits : 0);
}
static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits,
bool set)
{
regmap_update_bits(ksz_regmap_8(dev),
dev->dev_ops->get_port_addr(port, offset),
bits, set ? bits : 0);
ksz_rmw8(dev, dev->dev_ops->get_port_addr(port, offset), bits,
set ? bits : 0);
}
/**
@ -1955,16 +1954,19 @@ int ksz8_setup(struct dsa_switch *ds)
ksz_cfg(dev, S_LINK_AGING_CTRL, SW_LINK_AUTO_AGING, true);
/* Enable aggressive back off algorithm in half duplex mode. */
regmap_update_bits(ksz_regmap_8(dev), REG_SW_CTRL_1,
SW_AGGR_BACKOFF, SW_AGGR_BACKOFF);
ret = ksz_rmw8(dev, REG_SW_CTRL_1, SW_AGGR_BACKOFF, SW_AGGR_BACKOFF);
if (ret)
return ret;
/*
* Make sure unicast VLAN boundary is set as default and
* enable no excessive collision drop.
*/
regmap_update_bits(ksz_regmap_8(dev), REG_SW_CTRL_2,
UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP,
UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP);
ret = ksz_rmw8(dev, REG_SW_CTRL_2,
UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP,
UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP);
if (ret)
return ret;
ksz_cfg(dev, S_REPLACE_VID_CTRL, SW_REPLACE_VID, false);

View File

@ -1447,6 +1447,7 @@ static const struct regmap_range ksz8873_valid_regs[] = {
regmap_reg_range(0x3f, 0x3f),
/* advanced control registers */
regmap_reg_range(0x43, 0x43),
regmap_reg_range(0x60, 0x6f),
regmap_reg_range(0x70, 0x75),
regmap_reg_range(0x76, 0x78),

View File

@ -926,15 +926,21 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
static netmem_ref __bnxt_alloc_rx_netmem(struct bnxt *bp, dma_addr_t *mapping,
struct bnxt_rx_ring_info *rxr,
unsigned int *offset,
gfp_t gfp)
{
netmem_ref netmem;
netmem = page_pool_alloc_netmems(rxr->page_pool, gfp);
if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
netmem = page_pool_alloc_frag_netmem(rxr->page_pool, offset, BNXT_RX_PAGE_SIZE, gfp);
} else {
netmem = page_pool_alloc_netmems(rxr->page_pool, gfp);
*offset = 0;
}
if (!netmem)
return 0;
*mapping = page_pool_get_dma_addr_netmem(netmem);
*mapping = page_pool_get_dma_addr_netmem(netmem) + *offset;
return netmem;
}
@ -1029,7 +1035,7 @@ static int bnxt_alloc_rx_netmem(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
dma_addr_t mapping;
netmem_ref netmem;
netmem = __bnxt_alloc_rx_netmem(bp, &mapping, rxr, gfp);
netmem = __bnxt_alloc_rx_netmem(bp, &mapping, rxr, &offset, gfp);
if (!netmem)
return -ENOMEM;
@ -3819,7 +3825,6 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
if (BNXT_RX_PAGE_MODE(bp))
pp.pool_size += bp->rx_ring_size / rx_size_fac;
pp.nid = numa_node;
pp.napi = &rxr->bnapi->napi;
pp.netdev = bp->dev;
pp.dev = &bp->pdev->dev;
pp.dma_dir = bp->rx_dir;
@ -3851,6 +3856,12 @@ err_destroy_pp:
return PTR_ERR(pool);
}
static void bnxt_enable_rx_page_pool(struct bnxt_rx_ring_info *rxr)
{
page_pool_enable_direct_recycling(rxr->head_pool, &rxr->bnapi->napi);
page_pool_enable_direct_recycling(rxr->page_pool, &rxr->bnapi->napi);
}
static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
{
u16 mem_size;
@ -3889,6 +3900,7 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
rc = bnxt_alloc_rx_page_pool(bp, rxr, cpu_node);
if (rc)
return rc;
bnxt_enable_rx_page_pool(rxr);
rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
if (rc < 0)
@ -16031,6 +16043,7 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
goto err_reset;
}
bnxt_enable_rx_page_pool(rxr);
napi_enable_locked(&bnapi->napi);
bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);

View File

@ -53,9 +53,11 @@ static int hbg_reset_prepare(struct hbg_priv *priv, enum hbg_reset_type type)
{
int ret;
ASSERT_RTNL();
if (test_and_set_bit(HBG_NIC_STATE_RESETTING, &priv->state))
return -EBUSY;
if (netif_running(priv->netdev)) {
clear_bit(HBG_NIC_STATE_RESETTING, &priv->state);
dev_warn(&priv->pdev->dev,
"failed to reset because port is up\n");
return -EBUSY;
@ -64,7 +66,6 @@ static int hbg_reset_prepare(struct hbg_priv *priv, enum hbg_reset_type type)
netif_device_detach(priv->netdev);
priv->reset_type = type;
set_bit(HBG_NIC_STATE_RESETTING, &priv->state);
clear_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state);
ret = hbg_hw_event_notify(priv, HBG_HW_EVENT_RESET);
if (ret) {
@ -84,29 +85,26 @@ static int hbg_reset_done(struct hbg_priv *priv, enum hbg_reset_type type)
type != priv->reset_type)
return 0;
ASSERT_RTNL();
clear_bit(HBG_NIC_STATE_RESETTING, &priv->state);
ret = hbg_rebuild(priv);
if (ret) {
priv->stats.reset_fail_cnt++;
set_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state);
clear_bit(HBG_NIC_STATE_RESETTING, &priv->state);
dev_err(&priv->pdev->dev, "failed to rebuild after reset\n");
return ret;
}
netif_device_attach(priv->netdev);
clear_bit(HBG_NIC_STATE_RESETTING, &priv->state);
dev_info(&priv->pdev->dev, "reset done\n");
return ret;
}
/* must be protected by rtnl lock */
int hbg_reset(struct hbg_priv *priv)
{
int ret;
ASSERT_RTNL();
ret = hbg_reset_prepare(priv, HBG_RESET_TYPE_FUNCTION);
if (ret)
return ret;
@ -171,7 +169,6 @@ static void hbg_pci_err_reset_prepare(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct hbg_priv *priv = netdev_priv(netdev);
rtnl_lock();
hbg_reset_prepare(priv, HBG_RESET_TYPE_FLR);
}
@ -181,7 +178,6 @@ static void hbg_pci_err_reset_done(struct pci_dev *pdev)
struct hbg_priv *priv = netdev_priv(netdev);
hbg_reset_done(priv, HBG_RESET_TYPE_FLR);
rtnl_unlock();
}
static const struct pci_error_handlers hbg_pci_err_handler = {

View File

@ -12,6 +12,8 @@
#define HBG_HW_EVENT_WAIT_TIMEOUT_US (2 * 1000 * 1000)
#define HBG_HW_EVENT_WAIT_INTERVAL_US (10 * 1000)
#define HBG_MAC_LINK_WAIT_TIMEOUT_US (500 * 1000)
#define HBG_MAC_LINK_WAIT_INTERVAL_US (5 * 1000)
/* little endian or big endian.
* ctrl means packet description, data means skb packet data
*/
@ -228,6 +230,9 @@ void hbg_hw_fill_buffer(struct hbg_priv *priv, u32 buffer_dma_addr)
void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex)
{
u32 link_status;
int ret;
hbg_hw_mac_enable(priv, HBG_STATUS_DISABLE);
hbg_reg_write_field(priv, HBG_REG_PORT_MODE_ADDR,
@ -239,8 +244,14 @@ void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex)
hbg_hw_mac_enable(priv, HBG_STATUS_ENABLE);
if (!hbg_reg_read_field(priv, HBG_REG_AN_NEG_STATE_ADDR,
HBG_REG_AN_NEG_STATE_NP_LINK_OK_B))
/* wait MAC link up */
ret = readl_poll_timeout(priv->io_base + HBG_REG_AN_NEG_STATE_ADDR,
link_status,
FIELD_GET(HBG_REG_AN_NEG_STATE_NP_LINK_OK_B,
link_status),
HBG_MAC_LINK_WAIT_INTERVAL_US,
HBG_MAC_LINK_WAIT_TIMEOUT_US);
if (ret)
hbg_np_link_fail_task_schedule(priv);
}

View File

@ -29,7 +29,12 @@ static inline bool hbg_fifo_is_full(struct hbg_priv *priv, enum hbg_dir dir)
static inline u32 hbg_get_queue_used_num(struct hbg_ring *ring)
{
return (ring->ntu + ring->len - ring->ntc) % ring->len;
u32 len = READ_ONCE(ring->len);
if (!len)
return 0;
return (READ_ONCE(ring->ntu) + len - READ_ONCE(ring->ntc)) % len;
}
netdev_tx_t hbg_net_start_xmit(struct sk_buff *skb, struct net_device *netdev);

View File

@ -543,6 +543,7 @@ int ixgbe_devlink_register_port(struct ixgbe_adapter *adapter)
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
attrs.phys.port_number = adapter->hw.bus.func;
attrs.no_phys_port_name = 1;
ixgbe_devlink_set_switch_id(adapter, &attrs.switch_id);
devlink_port_attrs_set(devlink_port, &attrs);

View File

@ -330,15 +330,11 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
ret = devm_clk_bulk_get_all(&pdev->dev, &plat_dat->clks);
ret = devm_clk_bulk_get_all_enabled(&pdev->dev, &plat_dat->clks);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret, "Failed to retrieve all required clocks\n");
return dev_err_probe(&pdev->dev, ret, "Failed to retrieve and enable all required clocks\n");
plat_dat->num_clks = ret;
ret = clk_bulk_prepare_enable(plat_dat->num_clks, plat_dat->clks);
if (ret)
return dev_err_probe(&pdev->dev, ret, "Failed to enable clocks\n");
plat_dat->stmmac_clk = stmmac_pltfr_find_clk(plat_dat,
data->stmmac_clk_name);
@ -346,7 +342,6 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
ret = data->probe(pdev, plat_dat, &stmmac_res);
if (ret < 0) {
dev_err_probe(&pdev->dev, ret, "failed to probe subdriver\n");
clk_bulk_disable_unprepare(plat_dat->num_clks, plat_dat->clks);
return ret;
}
@ -370,15 +365,11 @@ remove:
static void dwc_eth_dwmac_remove(struct platform_device *pdev)
{
const struct dwc_eth_dwmac_data *data = device_get_match_data(&pdev->dev);
struct plat_stmmacenet_data *plat_dat = dev_get_platdata(&pdev->dev);
stmmac_dvr_remove(&pdev->dev);
if (data->remove)
data->remove(pdev);
if (plat_dat)
clk_bulk_disable_unprepare(plat_dat->num_clks, plat_dat->clks);
}
static const struct of_device_id dwc_eth_dwmac_match[] = {

View File

@ -1765,11 +1765,15 @@ err_gmac_powerdown:
static void rk_gmac_remove(struct platform_device *pdev)
{
struct rk_priv_data *bsp_priv = get_stmmac_bsp_priv(&pdev->dev);
struct stmmac_priv *priv = netdev_priv(platform_get_drvdata(pdev));
struct rk_priv_data *bsp_priv = priv->plat->bsp_priv;
stmmac_dvr_remove(&pdev->dev);
rk_gmac_powerdown(bsp_priv);
if (priv->plat->phy_node && bsp_priv->integrated_phy)
clk_put(bsp_priv->clk_phy);
}
#ifdef CONFIG_PM_SLEEP

View File

@ -211,6 +211,7 @@ static int thead_dwmac_probe(struct platform_device *pdev)
struct stmmac_resources stmmac_res;
struct plat_stmmacenet_data *plat;
struct thead_dwmac *dwmac;
struct clk *apb_clk;
void __iomem *apb;
int ret;
@ -224,6 +225,19 @@ static int thead_dwmac_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, PTR_ERR(plat),
"dt configuration failed\n");
/*
* The APB clock is essential for accessing glue registers. However,
* old devicetrees don't describe it correctly. We continue to probe
* and emit a warning if it isn't present.
*/
apb_clk = devm_clk_get_enabled(&pdev->dev, "apb");
if (PTR_ERR(apb_clk) == -ENOENT)
dev_warn(&pdev->dev,
"cannot get apb clock, link may break after speed changes\n");
else if (IS_ERR(apb_clk))
return dev_err_probe(&pdev->dev, PTR_ERR(apb_clk),
"failed to get apb clock\n");
dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
if (!dwmac)
return -ENOMEM;

View File

@ -621,7 +621,8 @@ exit:
static int icss_iep_extts_enable(struct icss_iep *iep, u32 index, int on)
{
u32 val, cap, ret = 0;
u32 val, cap;
int ret = 0;
mutex_lock(&iep->ptp_clk_mutex);

View File

@ -50,6 +50,8 @@
/* CTRLMMR_ICSSG_RGMII_CTRL register bits */
#define ICSSG_CTRL_RGMII_ID_MODE BIT(24)
static void emac_adjust_link(struct net_device *ndev);
static int emac_get_tx_ts(struct prueth_emac *emac,
struct emac_tx_ts_response *rsp)
{
@ -229,6 +231,10 @@ static int prueth_emac_common_start(struct prueth *prueth)
ret = icssg_config(prueth, emac, slice);
if (ret)
goto disable_class;
mutex_lock(&emac->ndev->phydev->lock);
emac_adjust_link(emac->ndev);
mutex_unlock(&emac->ndev->phydev->lock);
}
ret = prueth_emac_start(prueth);

View File

@ -138,7 +138,7 @@ static inline struct net_device *bpq_get_ax25_dev(struct net_device *dev)
static inline int dev_is_ethdev(struct net_device *dev)
{
return dev->type == ARPHRD_ETHER && strncmp(dev->name, "dummy", 5);
return dev->type == ARPHRD_ETHER && !netdev_need_ops_lock(dev);
}
/* ------------------------------------------------------------------------ */

View File

@ -1061,6 +1061,7 @@ struct net_device_context {
struct net_device __rcu *vf_netdev;
struct netvsc_vf_pcpu_stats __percpu *vf_stats;
struct delayed_work vf_takeover;
struct delayed_work vfns_work;
/* 1: allocated, serial number is valid. 0: not allocated */
u32 vf_alloc;
@ -1075,6 +1076,8 @@ struct net_device_context {
struct netvsc_device_info *saved_netvsc_dev_info;
};
void netvsc_vfns_work(struct work_struct *w);
/* Azure hosts don't support non-TCP port numbers in hashing for fragmented
* packets. We can use ethtool to change UDP hash level when necessary.
*/

View File

@ -2522,6 +2522,7 @@ static int netvsc_probe(struct hv_device *dev,
spin_lock_init(&net_device_ctx->lock);
INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup);
INIT_DELAYED_WORK(&net_device_ctx->vfns_work, netvsc_vfns_work);
net_device_ctx->vf_stats
= netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats);
@ -2666,6 +2667,8 @@ static void netvsc_remove(struct hv_device *dev)
cancel_delayed_work_sync(&ndev_ctx->dwork);
rtnl_lock();
cancel_delayed_work_sync(&ndev_ctx->vfns_work);
nvdev = rtnl_dereference(ndev_ctx->nvdev);
if (nvdev) {
cancel_work_sync(&nvdev->subchan_work);
@ -2707,6 +2710,7 @@ static int netvsc_suspend(struct hv_device *dev)
cancel_delayed_work_sync(&ndev_ctx->dwork);
rtnl_lock();
cancel_delayed_work_sync(&ndev_ctx->vfns_work);
nvdev = rtnl_dereference(ndev_ctx->nvdev);
if (nvdev == NULL) {
@ -2800,6 +2804,27 @@ static void netvsc_event_set_vf_ns(struct net_device *ndev)
}
}
void netvsc_vfns_work(struct work_struct *w)
{
struct net_device_context *ndev_ctx =
container_of(w, struct net_device_context, vfns_work.work);
struct net_device *ndev;
if (!rtnl_trylock()) {
schedule_delayed_work(&ndev_ctx->vfns_work, 1);
return;
}
ndev = hv_get_drvdata(ndev_ctx->device_ctx);
if (!ndev)
goto out;
netvsc_event_set_vf_ns(ndev);
out:
rtnl_unlock();
}
/*
* On Hyper-V, every VF interface is matched with a corresponding
* synthetic interface. The synthetic interface is presented first
@ -2810,10 +2835,12 @@ static int netvsc_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
struct net_device_context *ndev_ctx;
int ret = 0;
if (event_dev->netdev_ops == &device_ops && event == NETDEV_REGISTER) {
netvsc_event_set_vf_ns(event_dev);
ndev_ctx = netdev_priv(event_dev);
schedule_delayed_work(&ndev_ctx->vfns_work, 0);
return NOTIFY_DONE;
}

View File

@ -710,9 +710,13 @@ static struct nsim_rq *nsim_queue_alloc(void)
static void nsim_queue_free(struct net_device *dev, struct nsim_rq *rq)
{
hrtimer_cancel(&rq->napi_timer);
local_bh_disable();
dev_dstats_rx_dropped_add(dev, rq->skb_queue.qlen);
local_bh_enable();
if (rq->skb_queue.qlen) {
local_bh_disable();
dev_dstats_rx_dropped_add(dev, rq->skb_queue.qlen);
local_bh_enable();
}
skb_queue_purge_reason(&rq->skb_queue, SKB_DROP_REASON_QUEUE_PURGE);
kfree(rq);
}

View File

@ -91,6 +91,7 @@ int mdiobus_unregister_device(struct mdio_device *mdiodev)
if (mdiodev->bus->mdio_map[mdiodev->addr] != mdiodev)
return -EINVAL;
gpiod_put(mdiodev->reset_gpio);
reset_control_put(mdiodev->reset_ctrl);
mdiodev->bus->mdio_map[mdiodev->addr] = NULL;

View File

@ -443,9 +443,6 @@ void mdiobus_unregister(struct mii_bus *bus)
if (!mdiodev)
continue;
if (mdiodev->reset_gpio)
gpiod_put(mdiodev->reset_gpio);
mdiodev->device_remove(mdiodev);
mdiodev->device_free(mdiodev);
}

View File

@ -1965,24 +1965,27 @@ static int nxp_c45_macsec_ability(struct phy_device *phydev)
return macsec_ability;
}
static bool tja11xx_phy_id_compare(struct phy_device *phydev,
const struct phy_driver *phydrv)
{
u32 id = phydev->is_c45 ? phydev->c45_ids.device_ids[MDIO_MMD_PMAPMD] :
phydev->phy_id;
return phy_id_compare(id, phydrv->phy_id, phydrv->phy_id_mask);
}
static int tja11xx_no_macsec_match_phy_device(struct phy_device *phydev,
const struct phy_driver *phydrv)
{
if (!phy_id_compare(phydev->phy_id, phydrv->phy_id,
phydrv->phy_id_mask))
return 0;
return !nxp_c45_macsec_ability(phydev);
return tja11xx_phy_id_compare(phydev, phydrv) &&
!nxp_c45_macsec_ability(phydev);
}
static int tja11xx_macsec_match_phy_device(struct phy_device *phydev,
const struct phy_driver *phydrv)
{
if (!phy_id_compare(phydev->phy_id, phydrv->phy_id,
phydrv->phy_id_mask))
return 0;
return nxp_c45_macsec_ability(phydev);
return tja11xx_phy_id_compare(phydev, phydrv) &&
nxp_c45_macsec_ability(phydev);
}
static const struct nxp_c45_regmap tja1120_regmap = {

View File

@ -676,6 +676,7 @@ static int ax88772_init_mdio(struct usbnet *dev)
priv->mdio->read = &asix_mdio_bus_read;
priv->mdio->write = &asix_mdio_bus_write;
priv->mdio->name = "Asix MDIO Bus";
priv->mdio->phy_mask = ~(BIT(priv->phy_addr) | BIT(AX_EMBD_PHY_ADDR));
/* mii bus name is usb-<usb bus number>-<usb device number> */
snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
dev->udev->bus->busnum, dev->udev->devnum);

View File

@ -1361,6 +1361,7 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1057, 2)}, /* Telit FN980 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)}, /* Telit LN920 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)}, /* Telit FN990A */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1077, 2)}, /* Telit FN990A w/audio */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1080, 2)}, /* Telit FE990A */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x10a0, 0)}, /* Telit FN920C04 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x10a4, 0)}, /* Telit FN920C04 */

View File

@ -81,7 +81,7 @@ static struct lapbethdev *lapbeth_get_x25_dev(struct net_device *dev)
static __inline__ int dev_is_ethdev(struct net_device *dev)
{
return dev->type == ARPHRD_ETHER && strncmp(dev->name, "dummy", 5);
return dev->type == ARPHRD_ETHER && !netdev_need_ops_lock(dev);
}
/* ------------------------------------------------------------------------ */

View File

@ -24,6 +24,11 @@
#define PTP_DEFAULT_MAX_VCLOCKS 20
#define PTP_MAX_CHANNELS 2048
enum {
PTP_LOCK_PHYSICAL = 0,
PTP_LOCK_VIRTUAL,
};
struct timestamp_event_queue {
struct ptp_extts_event buf[PTP_MAX_TIMESTAMPS];
int head;

View File

@ -154,6 +154,11 @@ static long ptp_vclock_refresh(struct ptp_clock_info *ptp)
return PTP_VCLOCK_REFRESH_INTERVAL;
}
static void ptp_vclock_set_subclass(struct ptp_clock *ptp)
{
lockdep_set_subclass(&ptp->clock.rwsem, PTP_LOCK_VIRTUAL);
}
static const struct ptp_clock_info ptp_vclock_info = {
.owner = THIS_MODULE,
.name = "ptp virtual clock",
@ -213,6 +218,8 @@ struct ptp_vclock *ptp_vclock_register(struct ptp_clock *pclock)
return NULL;
}
ptp_vclock_set_subclass(vclock->clock);
timecounter_init(&vclock->tc, &vclock->cc, 0);
ptp_schedule_worker(vclock->clock, PTP_VCLOCK_REFRESH_INTERVAL);

View File

@ -6280,7 +6280,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
}
phba->nvmeio_trc_on = 1;
phba->nvmeio_trc_output_idx = 0;
phba->nvmeio_trc = NULL;
} else {
nvmeio_off:
phba->nvmeio_trc_size = 0;

View File

@ -666,7 +666,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
* Take early refcount for outstanding I/O requests we schedule during
* delete processing for unreg_vpi. Always keep this before
* scsi_remove_host() as we can no longer obtain a reference through
* scsi_host_get() after scsi_host_remove as shost is set to SHOST_DEL.
* scsi_host_get() after scsi_remove_host as shost is set to SHOST_DEL.
*/
if (!scsi_host_get(shost))
return VPORT_INVAL;

View File

@ -2674,8 +2674,10 @@ static int resp_rsup_tmfs(struct scsi_cmnd *scp,
static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
{ /* Read-Write Error Recovery page for mode_sense */
unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
5, 0, 0xff, 0xff};
static const unsigned char err_recov_pg[] = {
0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
5, 0, 0xff, 0xff
};
memcpy(p, err_recov_pg, sizeof(err_recov_pg));
if (1 == pcontrol)
@ -2685,8 +2687,10 @@ static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
{ /* Disconnect-Reconnect page for mode_sense */
unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0};
static const unsigned char disconnect_pg[] = {
0x2, 0xe, 128, 128, 0, 10, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0
};
memcpy(p, disconnect_pg, sizeof(disconnect_pg));
if (1 == pcontrol)
@ -2696,9 +2700,11 @@ static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
static int resp_format_pg(unsigned char *p, int pcontrol, int target)
{ /* Format device page for mode_sense */
unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0x40, 0, 0, 0};
static const unsigned char format_pg[] = {
0x3, 0x16, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0x40, 0, 0, 0
};
memcpy(p, format_pg, sizeof(format_pg));
put_unaligned_be16(sdebug_sectors_per, p + 10);
@ -2716,10 +2722,14 @@ static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
{ /* Caching page for mode_sense */
unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
static const unsigned char ch_caching_pg[] = {
/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
static const unsigned char d_caching_pg[] = {
0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0
};
if (SDEBUG_OPT_N_WCE & sdebug_opts)
caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
@ -2738,8 +2748,10 @@ static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
{ /* Control mode page for mode_sense */
unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
0, 0, 0, 0};
unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
0, 0, 0x2, 0x4b};
static const unsigned char d_ctrl_m_pg[] = {
0xa, 10, 2, 0, 0, 0, 0, 0,
0, 0, 0x2, 0x4b
};
if (sdebug_dsense)
ctrl_m_pg[2] |= 0x4;
@ -2794,10 +2806,14 @@ static int resp_grouping_m_pg(unsigned char *p, int pcontrol, int target)
static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
{ /* Informational Exceptions control mode page for mode_sense */
unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
0, 0, 0x0, 0x0};
unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
0, 0, 0x0, 0x0};
static const unsigned char ch_iec_m_pg[] = {
/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
0, 0, 0x0, 0x0
};
static const unsigned char d_iec_m_pg[] = {
0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
0, 0, 0x0, 0x0
};
memcpy(p, iec_m_pg, sizeof(iec_m_pg));
if (1 == pcontrol)
@ -2809,8 +2825,9 @@ static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
{ /* SAS SSP mode page - short format for mode_sense */
unsigned char sas_sf_m_pg[] = {0x19, 0x6,
0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
static const unsigned char sas_sf_m_pg[] = {
0x19, 0x6, 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0
};
memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
if (1 == pcontrol)
@ -2854,9 +2871,10 @@ static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
{ /* SAS SSP shared protocol specific port mode subpage */
unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
0, 0, 0, 0, 0, 0, 0, 0,
};
static const unsigned char sas_sha_m_pg[] = {
0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
0, 0, 0, 0, 0, 0, 0, 0,
};
memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
if (1 == pcontrol)
@ -2923,8 +2941,10 @@ static int process_medium_part_m_pg(struct sdebug_dev_info *devip,
static int resp_compression_m_pg(unsigned char *p, int pcontrol, int target,
unsigned char dce)
{ /* Compression page for mode_sense (tape) */
unsigned char compression_pg[] = {0x0f, 14, 0x40, 0, 0, 0, 0, 0,
0, 0, 0, 0, 00, 00};
static const unsigned char compression_pg[] = {
0x0f, 14, 0x40, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0
};
memcpy(p, compression_pg, sizeof(compression_pg));
if (dce)
@ -3282,9 +3302,10 @@ bad_pcode:
static int resp_temp_l_pg(unsigned char *arr)
{
unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
0x0, 0x1, 0x3, 0x2, 0x0, 65,
};
static const unsigned char temp_l_pg[] = {
0x0, 0x0, 0x3, 0x2, 0x0, 38,
0x0, 0x1, 0x3, 0x2, 0x0, 65,
};
memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
return sizeof(temp_l_pg);
@ -3292,8 +3313,9 @@ static int resp_temp_l_pg(unsigned char *arr)
static int resp_ie_l_pg(unsigned char *arr)
{
unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
};
static const unsigned char ie_l_pg[] = {
0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
};
memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
if (iec_m_pg[2] & 0x4) { /* TEST bit set */
@ -3305,11 +3327,12 @@ static int resp_ie_l_pg(unsigned char *arr)
static int resp_env_rep_l_spg(unsigned char *arr)
{
unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
0x0, 40, 72, 0xff, 45, 18, 0, 0,
0x1, 0x0, 0x23, 0x8,
0x0, 55, 72, 35, 55, 45, 0, 0,
};
static const unsigned char env_rep_l_spg[] = {
0x0, 0x0, 0x23, 0x8,
0x0, 40, 72, 0xff, 45, 18, 0, 0,
0x1, 0x0, 0x23, 0x8,
0x0, 55, 72, 35, 55, 45, 0, 0,
};
memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
return sizeof(env_rep_l_spg);

View File

@ -265,7 +265,7 @@ show_shost_supported_mode(struct device *dev, struct device_attribute *attr,
return show_shost_mode(supported_mode, buf);
}
static DEVICE_ATTR(supported_mode, S_IRUGO | S_IWUSR, show_shost_supported_mode, NULL);
static DEVICE_ATTR(supported_mode, S_IRUGO, show_shost_supported_mode, NULL);
static ssize_t
show_shost_active_mode(struct device *dev,
@ -279,7 +279,7 @@ show_shost_active_mode(struct device *dev,
return show_shost_mode(shost->active_mode, buf);
}
static DEVICE_ATTR(active_mode, S_IRUGO | S_IWUSR, show_shost_active_mode, NULL);
static DEVICE_ATTR(active_mode, S_IRUGO, show_shost_active_mode, NULL);
static int check_reset_type(const char *str)
{

View File

@ -1232,7 +1232,7 @@ err:
}
static int tegra_powergate_of_get_resets(struct tegra_powergate *pg,
struct device_node *np, bool off)
struct device_node *np)
{
struct device *dev = pg->pmc->dev;
int err;
@ -1247,22 +1247,6 @@ static int tegra_powergate_of_get_resets(struct tegra_powergate *pg,
err = reset_control_acquire(pg->reset);
if (err < 0) {
pr_err("failed to acquire resets: %d\n", err);
goto out;
}
if (off) {
err = reset_control_assert(pg->reset);
} else {
err = reset_control_deassert(pg->reset);
if (err < 0)
goto out;
reset_control_release(pg->reset);
}
out:
if (err) {
reset_control_release(pg->reset);
reset_control_put(pg->reset);
}
@ -1308,20 +1292,43 @@ static int tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
goto set_available;
}
err = tegra_powergate_of_get_resets(pg, np, off);
err = tegra_powergate_of_get_resets(pg, np);
if (err < 0) {
dev_err(dev, "failed to get resets for %pOFn: %d\n", np, err);
goto remove_clks;
}
if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS)) {
if (off)
WARN_ON(tegra_powergate_power_up(pg, true));
/*
* If the power-domain is off, then ensure the resets are asserted.
* If the power-domain is on, then power down to ensure that when is
* it turned on the power-domain, clocks and resets are all in the
* expected state.
*/
if (off) {
err = reset_control_assert(pg->reset);
if (err) {
pr_err("failed to assert resets: %d\n", err);
goto remove_resets;
}
} else {
err = tegra_powergate_power_down(pg);
if (err) {
dev_err(dev, "failed to turn off PM domain %s: %d\n",
pg->genpd.name, err);
goto remove_resets;
}
}
/*
* If PM_GENERIC_DOMAINS is not enabled, power-on
* the domain and skip the genpd registration.
*/
if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS)) {
WARN_ON(tegra_powergate_power_up(pg, true));
goto remove_resets;
}
err = pm_genpd_init(&pg->genpd, NULL, off);
err = pm_genpd_init(&pg->genpd, NULL, true);
if (err < 0) {
dev_err(dev, "failed to initialise PM domain %pOFn: %d\n", np,
err);

View File

@ -7138,14 +7138,19 @@ static irqreturn_t ufshcd_threaded_intr(int irq, void *__hba)
static irqreturn_t ufshcd_intr(int irq, void *__hba)
{
struct ufs_hba *hba = __hba;
u32 intr_status, enabled_intr_status;
/* Move interrupt handling to thread when MCQ & ESI are not enabled */
if (!hba->mcq_enabled || !hba->mcq_esi_enabled)
return IRQ_WAKE_THREAD;
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
enabled_intr_status = intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
/* Directly handle interrupts since MCQ ESI handlers does the hard job */
return ufshcd_sl_intr(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS) &
ufshcd_readl(hba, REG_INTERRUPT_ENABLE));
return ufshcd_sl_intr(hba, enabled_intr_status);
}
static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
@ -10516,8 +10521,7 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
err = devm_add_action_or_reset(dev, ufshcd_devres_release,
host);
if (err)
return dev_err_probe(dev, err,
"failed to add ufshcd dealloc action\n");
return err;
host->nr_maps = HCTX_TYPE_POLL + 1;
hba = shost_priv(host);

View File

@ -818,7 +818,7 @@ static u32 ufs_mtk_mcq_get_irq(struct ufs_hba *hba, unsigned int cpu)
unsigned int q_index;
q_index = map->mq_map[cpu];
if (q_index > nr) {
if (q_index >= nr) {
dev_err(hba->dev, "hwq index %d exceed %d\n",
q_index, nr);
return MTK_MCQ_INVALID_IRQ;

View File

@ -116,13 +116,11 @@ e_free:
static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
{
struct snp_derived_key_resp *derived_key_resp __free(kfree) = NULL;
struct snp_derived_key_req *derived_key_req __free(kfree) = NULL;
struct snp_derived_key_resp derived_key_resp = {0};
struct snp_msg_desc *mdesc = snp_dev->msg_desc;
struct snp_guest_req req = {};
int rc, resp_len;
/* Response data is 64 bytes and max authsize for GCM is 16 bytes. */
u8 buf[64 + 16];
if (!arg->req_data || !arg->resp_data)
return -EINVAL;
@ -132,8 +130,9 @@ static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_reque
* response payload. Make sure that it has enough space to cover the
* authtag.
*/
resp_len = sizeof(derived_key_resp.data) + mdesc->ctx->authsize;
if (sizeof(buf) < resp_len)
resp_len = sizeof(derived_key_resp->data) + mdesc->ctx->authsize;
derived_key_resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT);
if (!derived_key_resp)
return -ENOMEM;
derived_key_req = kzalloc(sizeof(*derived_key_req), GFP_KERNEL_ACCOUNT);
@ -149,23 +148,21 @@ static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_reque
req.vmpck_id = mdesc->vmpck_id;
req.req_buf = derived_key_req;
req.req_sz = sizeof(*derived_key_req);
req.resp_buf = buf;
req.resp_buf = derived_key_resp;
req.resp_sz = resp_len;
req.exit_code = SVM_VMGEXIT_GUEST_REQUEST;
rc = snp_send_guest_request(mdesc, &req);
arg->exitinfo2 = req.exitinfo2;
if (rc)
return rc;
memcpy(derived_key_resp.data, buf, sizeof(derived_key_resp.data));
if (copy_to_user((void __user *)arg->resp_data, &derived_key_resp,
sizeof(derived_key_resp)))
rc = -EFAULT;
if (!rc) {
if (copy_to_user((void __user *)arg->resp_data, derived_key_resp,
sizeof(derived_key_resp->data)))
rc = -EFAULT;
}
/* The response buffer contains the sensitive data, explicitly clear it. */
memzero_explicit(buf, sizeof(buf));
memzero_explicit(&derived_key_resp, sizeof(derived_key_resp));
memzero_explicit(derived_key_resp, sizeof(*derived_key_resp));
return rc;
}

View File

@ -4331,15 +4331,18 @@ static int try_release_subpage_extent_buffer(struct folio *folio)
unsigned long end = index + (PAGE_SIZE >> fs_info->nodesize_bits) - 1;
int ret;
xa_lock_irq(&fs_info->buffer_tree);
rcu_read_lock();
xa_for_each_range(&fs_info->buffer_tree, index, eb, start, end) {
/*
* The same as try_release_extent_buffer(), to ensure the eb
* won't disappear out from under us.
*/
spin_lock(&eb->refs_lock);
rcu_read_unlock();
if (refcount_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
spin_unlock(&eb->refs_lock);
rcu_read_lock();
continue;
}
@ -4358,11 +4361,10 @@ static int try_release_subpage_extent_buffer(struct folio *folio)
* check the folio private at the end. And
* release_extent_buffer() will release the refs_lock.
*/
xa_unlock_irq(&fs_info->buffer_tree);
release_extent_buffer(eb);
xa_lock_irq(&fs_info->buffer_tree);
rcu_read_lock();
}
xa_unlock_irq(&fs_info->buffer_tree);
rcu_read_unlock();
/*
* Finally to check if we have cleared folio private, as if we have
@ -4375,7 +4377,6 @@ static int try_release_subpage_extent_buffer(struct folio *folio)
ret = 0;
spin_unlock(&folio->mapping->i_private_lock);
return ret;
}
int try_release_extent_buffer(struct folio *folio)

View File

@ -401,10 +401,12 @@ static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode,
while (index <= end_index) {
folio = filemap_get_folio(inode->vfs_inode.i_mapping, index);
index++;
if (IS_ERR(folio))
if (IS_ERR(folio)) {
index++;
continue;
}
index = folio_end(folio) >> PAGE_SHIFT;
/*
* Here we just clear all Ordered bits for every page in the
* range, then btrfs_mark_ordered_io_finished() will handle
@ -2013,7 +2015,7 @@ static int nocow_one_range(struct btrfs_inode *inode, struct folio *locked_folio
* cleaered by the caller.
*/
if (ret < 0)
btrfs_cleanup_ordered_extents(inode, file_pos, end);
btrfs_cleanup_ordered_extents(inode, file_pos, len);
return ret;
}

View File

@ -1453,7 +1453,6 @@ static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info, u64 ref_root,
struct btrfs_qgroup *src, int sign)
{
struct btrfs_qgroup *qgroup;
struct btrfs_qgroup *cur;
LIST_HEAD(qgroup_list);
u64 num_bytes = src->excl;
int ret = 0;
@ -1463,7 +1462,7 @@ static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info, u64 ref_root,
goto out;
qgroup_iterator_add(&qgroup_list, qgroup);
list_for_each_entry(cur, &qgroup_list, iterator) {
list_for_each_entry(qgroup, &qgroup_list, iterator) {
struct btrfs_qgroup_list *glist;
qgroup->rfer += sign * num_bytes;

View File

@ -602,6 +602,25 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
if (btrfs_root_id(root) == objectid) {
u64 commit_root_gen;
/*
* Relocation will wait for cleaner thread, and any half-dropped
* subvolume will be fully cleaned up at mount time.
* So here we shouldn't hit a subvolume with non-zero drop_progress.
*
* If this isn't the case, error out since it can make us attempt to
* drop references for extents that were already dropped before.
*/
if (unlikely(btrfs_disk_key_objectid(&root->root_item.drop_progress))) {
struct btrfs_key cpu_key;
btrfs_disk_key_to_cpu(&cpu_key, &root->root_item.drop_progress);
btrfs_err(fs_info,
"cannot relocate partially dropped subvolume %llu, drop progress key (%llu %u %llu)",
objectid, cpu_key.objectid, cpu_key.type, cpu_key.offset);
ret = -EUCLEAN;
goto fail;
}
/* called by btrfs_init_reloc_root */
ret = btrfs_copy_root(trans, root, root->commit_root, &eb,
BTRFS_TREE_RELOC_OBJECTID);

View File

@ -2605,14 +2605,14 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
/*
* Correctly adjust the reserved bytes occupied by a log tree extent buffer
*/
static void unaccount_log_buffer(struct btrfs_fs_info *fs_info, u64 start)
static int unaccount_log_buffer(struct btrfs_fs_info *fs_info, u64 start)
{
struct btrfs_block_group *cache;
cache = btrfs_lookup_block_group(fs_info, start);
if (!cache) {
btrfs_err(fs_info, "unable to find block group for %llu", start);
return;
return -ENOENT;
}
spin_lock(&cache->space_info->lock);
@ -2623,27 +2623,22 @@ static void unaccount_log_buffer(struct btrfs_fs_info *fs_info, u64 start)
spin_unlock(&cache->space_info->lock);
btrfs_put_block_group(cache);
return 0;
}
static int clean_log_buffer(struct btrfs_trans_handle *trans,
struct extent_buffer *eb)
{
int ret;
btrfs_tree_lock(eb);
btrfs_clear_buffer_dirty(trans, eb);
wait_on_extent_buffer_writeback(eb);
btrfs_tree_unlock(eb);
if (trans) {
ret = btrfs_pin_reserved_extent(trans, eb);
if (ret)
return ret;
} else {
unaccount_log_buffer(eb->fs_info, eb->start);
}
if (trans)
return btrfs_pin_reserved_extent(trans, eb);
return 0;
return unaccount_log_buffer(eb->fs_info, eb->start);
}
static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,

View File

@ -2650,7 +2650,7 @@ int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info)
spin_lock(&block_group->lock);
if (block_group->reserved || block_group->alloc_offset == 0 ||
(block_group->flags & BTRFS_BLOCK_GROUP_SYSTEM) ||
!(block_group->flags & BTRFS_BLOCK_GROUP_DATA) ||
test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) {
spin_unlock(&block_group->lock);
continue;

Some files were not shown because too many files have changed in this diff Show More