mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-04-14 18:29:12 +08:00
Merge branch 'for-6.5/cxl-region-fixes' into for-6.5/cxl
Pick up the recent fixes to how CPU caches are managed relative to region setup / teardown, and make sure that all decoders transition successfully before updating the region state from COMMIT => ACTIVE.
This commit is contained in:
@@ -125,10 +125,38 @@ static struct cxl_region_ref *cxl_rr_load(struct cxl_port *port,
|
||||
return xa_load(&port->regions, (unsigned long)cxlr);
|
||||
}
|
||||
|
||||
static int cxl_region_invalidate_memregion(struct cxl_region *cxlr)
|
||||
{
|
||||
if (!cpu_cache_has_invalidate_memregion()) {
|
||||
if (IS_ENABLED(CONFIG_CXL_REGION_INVALIDATION_TEST)) {
|
||||
dev_warn_once(
|
||||
&cxlr->dev,
|
||||
"Bypassing cpu_cache_invalidate_memregion() for testing!\n");
|
||||
return 0;
|
||||
} else {
|
||||
dev_err(&cxlr->dev,
|
||||
"Failed to synchronize CPU cache state\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
}
|
||||
|
||||
cpu_cache_invalidate_memregion(IORES_DESC_CXL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
|
||||
{
|
||||
struct cxl_region_params *p = &cxlr->params;
|
||||
int i;
|
||||
int i, rc = 0;
|
||||
|
||||
/*
|
||||
* Before region teardown attempt to flush, and if the flush
|
||||
* fails cancel the region teardown for data consistency
|
||||
* concerns
|
||||
*/
|
||||
rc = cxl_region_invalidate_memregion(cxlr);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
for (i = count - 1; i >= 0; i--) {
|
||||
struct cxl_endpoint_decoder *cxled = p->targets[i];
|
||||
@@ -136,7 +164,6 @@ static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
|
||||
struct cxl_port *iter = cxled_to_port(cxled);
|
||||
struct cxl_dev_state *cxlds = cxlmd->cxlds;
|
||||
struct cxl_ep *ep;
|
||||
int rc = 0;
|
||||
|
||||
if (cxlds->rcd)
|
||||
goto endpoint_reset;
|
||||
@@ -155,14 +182,19 @@ static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
|
||||
rc = cxld->reset(cxld);
|
||||
if (rc)
|
||||
return rc;
|
||||
set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
|
||||
}
|
||||
|
||||
endpoint_reset:
|
||||
rc = cxled->cxld.reset(&cxled->cxld);
|
||||
if (rc)
|
||||
return rc;
|
||||
set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
|
||||
}
|
||||
|
||||
/* all decoders associated with this region have been torn down */
|
||||
clear_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -256,9 +288,19 @@ static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (commit)
|
||||
/*
|
||||
* Invalidate caches before region setup to drop any speculative
|
||||
* consumption of this address space
|
||||
*/
|
||||
rc = cxl_region_invalidate_memregion(cxlr);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (commit) {
|
||||
rc = cxl_region_decode_commit(cxlr);
|
||||
else {
|
||||
if (rc == 0)
|
||||
p->state = CXL_CONFIG_COMMIT;
|
||||
} else {
|
||||
p->state = CXL_CONFIG_RESET_PENDING;
|
||||
up_write(&cxl_region_rwsem);
|
||||
device_release_driver(&cxlr->dev);
|
||||
@@ -268,18 +310,20 @@ static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
|
||||
* The lock was dropped, so need to revalidate that the reset is
|
||||
* still pending.
|
||||
*/
|
||||
if (p->state == CXL_CONFIG_RESET_PENDING)
|
||||
if (p->state == CXL_CONFIG_RESET_PENDING) {
|
||||
rc = cxl_region_decode_reset(cxlr, p->interleave_ways);
|
||||
/*
|
||||
* Revert to committed since there may still be active
|
||||
* decoders associated with this region, or move forward
|
||||
* to active to mark the reset successful
|
||||
*/
|
||||
if (rc)
|
||||
p->state = CXL_CONFIG_COMMIT;
|
||||
else
|
||||
p->state = CXL_CONFIG_ACTIVE;
|
||||
}
|
||||
}
|
||||
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
if (commit)
|
||||
p->state = CXL_CONFIG_COMMIT;
|
||||
else if (p->state == CXL_CONFIG_RESET_PENDING)
|
||||
p->state = CXL_CONFIG_ACTIVE;
|
||||
|
||||
out:
|
||||
up_write(&cxl_region_rwsem);
|
||||
|
||||
@@ -1686,7 +1730,6 @@ static int cxl_region_attach(struct cxl_region *cxlr,
|
||||
if (rc)
|
||||
goto err_decrement;
|
||||
p->state = CXL_CONFIG_ACTIVE;
|
||||
set_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags);
|
||||
}
|
||||
|
||||
cxled->cxld.interleave_ways = p->interleave_ways;
|
||||
@@ -2815,30 +2858,6 @@ out:
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(cxl_add_to_region, CXL);
|
||||
|
||||
static int cxl_region_invalidate_memregion(struct cxl_region *cxlr)
|
||||
{
|
||||
if (!test_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags))
|
||||
return 0;
|
||||
|
||||
if (!cpu_cache_has_invalidate_memregion()) {
|
||||
if (IS_ENABLED(CONFIG_CXL_REGION_INVALIDATION_TEST)) {
|
||||
dev_warn_once(
|
||||
&cxlr->dev,
|
||||
"Bypassing cpu_cache_invalidate_memregion() for testing!\n");
|
||||
clear_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags);
|
||||
return 0;
|
||||
} else {
|
||||
dev_err(&cxlr->dev,
|
||||
"Failed to synchronize CPU cache state\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
}
|
||||
|
||||
cpu_cache_invalidate_memregion(IORES_DESC_CXL);
|
||||
clear_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int is_system_ram(struct resource *res, void *arg)
|
||||
{
|
||||
struct cxl_region *cxlr = arg;
|
||||
@@ -2866,7 +2885,12 @@ static int cxl_region_probe(struct device *dev)
|
||||
goto out;
|
||||
}
|
||||
|
||||
rc = cxl_region_invalidate_memregion(cxlr);
|
||||
if (test_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags)) {
|
||||
dev_err(&cxlr->dev,
|
||||
"failed to activate, re-commit region and retry\n");
|
||||
rc = -ENXIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* From this point on any path that changes the region's state away from
|
||||
|
||||
@@ -470,18 +470,20 @@ struct cxl_region_params {
|
||||
int nr_targets;
|
||||
};
|
||||
|
||||
/*
|
||||
* Flag whether this region needs to have its HPA span synchronized with
|
||||
* CPU cache state at region activation time.
|
||||
*/
|
||||
#define CXL_REGION_F_INCOHERENT 0
|
||||
|
||||
/*
|
||||
* Indicate whether this region has been assembled by autodetection or
|
||||
* userspace assembly. Prevent endpoint decoders outside of automatic
|
||||
* detection from being added to the region.
|
||||
*/
|
||||
#define CXL_REGION_F_AUTO 1
|
||||
#define CXL_REGION_F_AUTO 0
|
||||
|
||||
/*
|
||||
* Require that a committed region successfully complete a teardown once
|
||||
* any of its associated decoders have been torn down. This maintains
|
||||
* the commit state for the region since there are committed decoders,
|
||||
* but blocks cxl_region_probe().
|
||||
*/
|
||||
#define CXL_REGION_F_NEEDS_RESET 1
|
||||
|
||||
/**
|
||||
* struct cxl_region - CXL region
|
||||
|
||||
Reference in New Issue
Block a user