aboutsummaryrefslogtreecommitdiff
path: root/drivers/cxl/core/pci.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cxl/core/pci.c')
-rw-r--r--drivers/cxl/core/pci.c275
1 files changed, 250 insertions, 25 deletions
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
index c7a7887ebdcf..eff20e83d0a6 100644
--- a/drivers/cxl/core/pci.c
+++ b/drivers/cxl/core/pci.c
@@ -5,6 +5,7 @@
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/pci-doe.h>
+#include <linux/aer.h>
#include <cxlpci.h>
#include <cxlmem.h>
#include <cxl.h>
@@ -595,6 +596,16 @@ static int cxl_cdat_read_table(struct device *dev,
return 0;
}
+static unsigned char cdat_checksum(void *buf, size_t size)
+{
+ unsigned char sum, *data = buf;
+ size_t i;
+
+ for (sum = 0, i = 0; i < size; i++)
+ sum += data[i];
+ return sum;
+}
+
/**
* read_cdat_data - Read the CDAT data on this port
* @port: Port to read data from
@@ -603,18 +614,30 @@ static int cxl_cdat_read_table(struct device *dev,
*/
void read_cdat_data(struct cxl_port *port)
{
- struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
- struct device *host = cxlmd->dev.parent;
+ struct device *uport = port->uport_dev;
struct device *dev = &port->dev;
struct pci_doe_mb *cdat_doe;
+ struct pci_dev *pdev = NULL;
+ struct cxl_memdev *cxlmd;
size_t cdat_length;
void *cdat_table;
int rc;
- if (!dev_is_pci(host))
+ if (is_cxl_memdev(uport)) {
+ struct device *host;
+
+ cxlmd = to_cxl_memdev(uport);
+ host = cxlmd->dev.parent;
+ if (dev_is_pci(host))
+ pdev = to_pci_dev(host);
+ } else if (dev_is_pci(uport)) {
+ pdev = to_pci_dev(uport);
+ }
+
+ if (!pdev)
return;
- cdat_doe = pci_find_doe_mailbox(to_pci_dev(host),
- PCI_DVSEC_VENDOR_ID_CXL,
+
+ cdat_doe = pci_find_doe_mailbox(pdev, PCI_DVSEC_VENDOR_ID_CXL,
CXL_DOE_PROTOCOL_TABLE_ACCESS);
if (!cdat_doe) {
dev_dbg(dev, "No CDAT mailbox\n");
@@ -634,44 +657,54 @@ void read_cdat_data(struct cxl_port *port)
return;
rc = cxl_cdat_read_table(dev, cdat_doe, cdat_table, &cdat_length);
- if (rc) {
- /* Don't leave table data allocated on error */
- devm_kfree(dev, cdat_table);
- dev_err(dev, "CDAT data read error\n");
- return;
- }
+ if (rc)
+ goto err;
+
+ cdat_table = cdat_table + sizeof(__le32);
+ if (cdat_checksum(cdat_table, cdat_length))
+ goto err;
- port->cdat.table = cdat_table + sizeof(__le32);
+ port->cdat.table = cdat_table;
port->cdat.length = cdat_length;
+ return;
+
+err:
+ /* Don't leave table data allocated on error */
+ devm_kfree(dev, cdat_table);
+ dev_err(dev, "Failed to read/validate CDAT.\n");
}
EXPORT_SYMBOL_NS_GPL(read_cdat_data, CXL);
-void cxl_cor_error_detected(struct pci_dev *pdev)
+static void __cxl_handle_cor_ras(struct cxl_dev_state *cxlds,
+ void __iomem *ras_base)
{
- struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
void __iomem *addr;
u32 status;
- if (!cxlds->regs.ras)
+ if (!ras_base)
return;
- addr = cxlds->regs.ras + CXL_RAS_CORRECTABLE_STATUS_OFFSET;
+ addr = ras_base + CXL_RAS_CORRECTABLE_STATUS_OFFSET;
status = readl(addr);
if (status & CXL_RAS_CORRECTABLE_STATUS_MASK) {
writel(status & CXL_RAS_CORRECTABLE_STATUS_MASK, addr);
trace_cxl_aer_correctable_error(cxlds->cxlmd, status);
}
}
-EXPORT_SYMBOL_NS_GPL(cxl_cor_error_detected, CXL);
+
+static void cxl_handle_endpoint_cor_ras(struct cxl_dev_state *cxlds)
+{
+ return __cxl_handle_cor_ras(cxlds, cxlds->regs.ras);
+}
/* CXL spec rev3.0 8.2.4.16.1 */
-static void header_log_copy(struct cxl_dev_state *cxlds, u32 *log)
+static void header_log_copy(void __iomem *ras_base, u32 *log)
{
void __iomem *addr;
u32 *log_addr;
int i, log_u32_size = CXL_HEADERLOG_SIZE / sizeof(u32);
- addr = cxlds->regs.ras + CXL_RAS_HEADER_LOG_OFFSET;
+ addr = ras_base + CXL_RAS_HEADER_LOG_OFFSET;
log_addr = log;
for (i = 0; i < log_u32_size; i++) {
@@ -685,17 +718,18 @@ static void header_log_copy(struct cxl_dev_state *cxlds, u32 *log)
* Log the state of the RAS status registers and prepare them to log the
* next error status. Return 1 if reset needed.
*/
-static bool cxl_report_and_clear(struct cxl_dev_state *cxlds)
+static bool __cxl_handle_ras(struct cxl_dev_state *cxlds,
+ void __iomem *ras_base)
{
u32 hl[CXL_HEADERLOG_SIZE_U32];
void __iomem *addr;
u32 status;
u32 fe;
- if (!cxlds->regs.ras)
+ if (!ras_base)
return false;
- addr = cxlds->regs.ras + CXL_RAS_UNCORRECTABLE_STATUS_OFFSET;
+ addr = ras_base + CXL_RAS_UNCORRECTABLE_STATUS_OFFSET;
status = readl(addr);
if (!(status & CXL_RAS_UNCORRECTABLE_STATUS_MASK))
return false;
@@ -703,7 +737,7 @@ static bool cxl_report_and_clear(struct cxl_dev_state *cxlds)
/* If multiple errors, log header points to first error from ctrl reg */
if (hweight32(status) > 1) {
void __iomem *rcc_addr =
- cxlds->regs.ras + CXL_RAS_CAP_CONTROL_OFFSET;
+ ras_base + CXL_RAS_CAP_CONTROL_OFFSET;
fe = BIT(FIELD_GET(CXL_RAS_CAP_CONTROL_FE_MASK,
readl(rcc_addr)));
@@ -711,13 +745,201 @@ static bool cxl_report_and_clear(struct cxl_dev_state *cxlds)
fe = status;
}
- header_log_copy(cxlds, hl);
+ header_log_copy(ras_base, hl);
trace_cxl_aer_uncorrectable_error(cxlds->cxlmd, status, fe, hl);
writel(status & CXL_RAS_UNCORRECTABLE_STATUS_MASK, addr);
return true;
}
+static bool cxl_handle_endpoint_ras(struct cxl_dev_state *cxlds)
+{
+ return __cxl_handle_ras(cxlds, cxlds->regs.ras);
+}
+
+#ifdef CONFIG_PCIEAER_CXL
+
+static void cxl_dport_map_rch_aer(struct cxl_dport *dport)
+{
+ struct cxl_rcrb_info *ri = &dport->rcrb;
+ void __iomem *dport_aer = NULL;
+ resource_size_t aer_phys;
+ struct device *host;
+
+ if (dport->rch && ri->aer_cap) {
+ host = dport->reg_map.host;
+ aer_phys = ri->aer_cap + ri->base;
+ dport_aer = devm_cxl_iomap_block(host, aer_phys,
+ sizeof(struct aer_capability_regs));
+ }
+
+ dport->regs.dport_aer = dport_aer;
+}
+
+static void cxl_dport_map_regs(struct cxl_dport *dport)
+{
+ struct cxl_register_map *map = &dport->reg_map;
+ struct device *dev = dport->dport_dev;
+
+ if (!map->component_map.ras.valid)
+ dev_dbg(dev, "RAS registers not found\n");
+ else if (cxl_map_component_regs(map, &dport->regs.component,
+ BIT(CXL_CM_CAP_CAP_ID_RAS)))
+ dev_dbg(dev, "Failed to map RAS capability.\n");
+
+ if (dport->rch)
+ cxl_dport_map_rch_aer(dport);
+}
+
+static void cxl_disable_rch_root_ints(struct cxl_dport *dport)
+{
+ void __iomem *aer_base = dport->regs.dport_aer;
+ struct pci_host_bridge *bridge;
+ u32 aer_cmd_mask, aer_cmd;
+
+ if (!aer_base)
+ return;
+
+ bridge = to_pci_host_bridge(dport->dport_dev);
+
+ /*
+ * Disable RCH root port command interrupts.
+ * CXL 3.0 12.2.1.1 - RCH Downstream Port-detected Errors
+ *
+ * This sequence may not be necessary. CXL spec states disabling
+ * the root cmd register's interrupts is required. But, PCI spec
+ * shows these are disabled by default on reset.
+ */
+ if (bridge->native_aer) {
+ aer_cmd_mask = (PCI_ERR_ROOT_CMD_COR_EN |
+ PCI_ERR_ROOT_CMD_NONFATAL_EN |
+ PCI_ERR_ROOT_CMD_FATAL_EN);
+ aer_cmd = readl(aer_base + PCI_ERR_ROOT_COMMAND);
+ aer_cmd &= ~aer_cmd_mask;
+ writel(aer_cmd, aer_base + PCI_ERR_ROOT_COMMAND);
+ }
+}
+
+void cxl_setup_parent_dport(struct device *host, struct cxl_dport *dport)
+{
+ struct device *dport_dev = dport->dport_dev;
+ struct pci_host_bridge *host_bridge;
+
+ host_bridge = to_pci_host_bridge(dport_dev);
+ if (host_bridge->native_aer)
+ dport->rcrb.aer_cap = cxl_rcrb_to_aer(dport_dev, dport->rcrb.base);
+
+ dport->reg_map.host = host;
+ cxl_dport_map_regs(dport);
+
+ if (dport->rch)
+ cxl_disable_rch_root_ints(dport);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_setup_parent_dport, CXL);
+
+static void cxl_handle_rdport_cor_ras(struct cxl_dev_state *cxlds,
+ struct cxl_dport *dport)
+{
+ return __cxl_handle_cor_ras(cxlds, dport->regs.ras);
+}
+
+static bool cxl_handle_rdport_ras(struct cxl_dev_state *cxlds,
+ struct cxl_dport *dport)
+{
+ return __cxl_handle_ras(cxlds, dport->regs.ras);
+}
+
+/*
+ * Copy the AER capability registers using 32 bit read accesses.
+ * This is necessary because RCRB AER capability is MMIO mapped. Clear the
+ * status after copying.
+ *
+ * @aer_base: base address of AER capability block in RCRB
+ * @aer_regs: destination for copying AER capability
+ */
+static bool cxl_rch_get_aer_info(void __iomem *aer_base,
+ struct aer_capability_regs *aer_regs)
+{
+ int read_cnt = sizeof(struct aer_capability_regs) / sizeof(u32);
+ u32 *aer_regs_buf = (u32 *)aer_regs;
+ int n;
+
+ if (!aer_base)
+ return false;
+
+ /* Use readl() to guarantee 32-bit accesses */
+ for (n = 0; n < read_cnt; n++)
+ aer_regs_buf[n] = readl(aer_base + n * sizeof(u32));
+
+ writel(aer_regs->uncor_status, aer_base + PCI_ERR_UNCOR_STATUS);
+ writel(aer_regs->cor_status, aer_base + PCI_ERR_COR_STATUS);
+
+ return true;
+}
+
+/* Get AER severity. Return false if there is no error. */
+static bool cxl_rch_get_aer_severity(struct aer_capability_regs *aer_regs,
+ int *severity)
+{
+ if (aer_regs->uncor_status & ~aer_regs->uncor_mask) {
+ if (aer_regs->uncor_status & PCI_ERR_ROOT_FATAL_RCV)
+ *severity = AER_FATAL;
+ else
+ *severity = AER_NONFATAL;
+ return true;
+ }
+
+ if (aer_regs->cor_status & ~aer_regs->cor_mask) {
+ *severity = AER_CORRECTABLE;
+ return true;
+ }
+
+ return false;
+}
+
+static void cxl_handle_rdport_errors(struct cxl_dev_state *cxlds)
+{
+ struct pci_dev *pdev = to_pci_dev(cxlds->dev);
+ struct aer_capability_regs aer_regs;
+ struct cxl_dport *dport;
+ struct cxl_port *port;
+ int severity;
+
+ port = cxl_pci_find_port(pdev, &dport);
+ if (!port)
+ return;
+
+ put_device(&port->dev);
+
+ if (!cxl_rch_get_aer_info(dport->regs.dport_aer, &aer_regs))
+ return;
+
+ if (!cxl_rch_get_aer_severity(&aer_regs, &severity))
+ return;
+
+ pci_print_aer(pdev, severity, &aer_regs);
+
+ if (severity == AER_CORRECTABLE)
+ cxl_handle_rdport_cor_ras(cxlds, dport);
+ else
+ cxl_handle_rdport_ras(cxlds, dport);
+}
+
+#else
+static void cxl_handle_rdport_errors(struct cxl_dev_state *cxlds) { }
+#endif
+
+void cxl_cor_error_detected(struct pci_dev *pdev)
+{
+ struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
+
+ if (cxlds->rcd)
+ cxl_handle_rdport_errors(cxlds);
+
+ cxl_handle_endpoint_cor_ras(cxlds);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_cor_error_detected, CXL);
+
pci_ers_result_t cxl_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
@@ -726,13 +948,16 @@ pci_ers_result_t cxl_error_detected(struct pci_dev *pdev,
struct device *dev = &cxlmd->dev;
bool ue;
+ if (cxlds->rcd)
+ cxl_handle_rdport_errors(cxlds);
+
/*
* A frozen channel indicates an impending reset which is fatal to
* CXL.mem operation, and will likely crash the system. On the off
* chance the situation is recoverable dump the status of the RAS
* capability registers and bounce the active state of the memdev.
*/
- ue = cxl_report_and_clear(cxlds);
+ ue = cxl_handle_endpoint_ras(cxlds);
switch (state) {
case pci_channel_io_normal: