aboutsummaryrefslogtreecommitdiff
path: root/drivers/misc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/misc')
-rw-r--r--drivers/misc/Kconfig12
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/bcm-vk/bcm_vk_tty.c4
-rw-r--r--drivers/misc/cardreader/Makefile2
-rw-r--r--drivers/misc/cardreader/rts5264.c886
-rw-r--r--drivers/misc/cardreader/rts5264.h278
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c30
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.h1
-rw-r--r--drivers/misc/cxl/cxl.h3
-rw-r--r--drivers/misc/dw-xdata-pcie.c6
-rw-r--r--drivers/misc/eeprom/at24.c43
-rw-r--r--drivers/misc/eeprom/ee1004.c113
-rw-r--r--drivers/misc/genwqe/card_dev.c2
-rw-r--r--drivers/misc/genwqe/card_utils.c4
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d_i2c.c1
-rw-r--r--drivers/misc/lkdtm/heap.c60
-rw-r--r--drivers/misc/mei/Kconfig35
-rw-r--r--drivers/misc/mei/Makefile7
-rw-r--r--drivers/misc/mei/client.c4
-rw-r--r--drivers/misc/mei/gsc_proxy/Kconfig2
-rw-r--r--drivers/misc/mei/hdcp/Kconfig2
-rw-r--r--drivers/misc/mei/platform-vsc.c450
-rw-r--r--drivers/misc/mei/pxp/Kconfig3
-rw-r--r--drivers/misc/mei/pxp/mei_pxp.c3
-rw-r--r--drivers/misc/mei/vsc-fw-loader.c770
-rw-r--r--drivers/misc/mei/vsc-tp.c555
-rw-r--r--drivers/misc/mei/vsc-tp.h50
-rw-r--r--drivers/misc/nsm.c506
-rw-r--r--drivers/misc/ocxl/afu_irq.c2
-rw-r--r--drivers/misc/ocxl/context.c2
-rw-r--r--drivers/misc/ocxl/file.c2
-rw-r--r--drivers/misc/ocxl/link.c14
-rw-r--r--drivers/misc/ocxl/main.c2
-rw-r--r--drivers/misc/pci_endpoint_test.c50
-rw-r--r--drivers/misc/pvpanic/pvpanic-mmio.c7
-rw-r--r--drivers/misc/pvpanic/pvpanic-pci.c6
-rw-r--r--drivers/misc/pvpanic/pvpanic.c12
-rw-r--r--drivers/misc/pvpanic/pvpanic.h5
-rw-r--r--drivers/misc/vmw_vmci/vmci_handle_array.c12
-rw-r--r--drivers/misc/vmw_vmci/vmci_handle_array.h8
40 files changed, 3826 insertions, 129 deletions
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index f37c4b8380ae..4fb291f0bf7c 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -562,6 +562,18 @@ config TPS6594_PFSM
This driver can also be built as a module. If so, the module
will be called tps6594-pfsm.
+config NSM
+ tristate "Nitro (Enclaves) Security Module support"
+ depends on VIRTIO
+ select HW_RANDOM
+ help
+ This driver provides support for the Nitro Security Module
+ in AWS EC2 Nitro based Enclaves. The driver exposes a /dev/nsm
+ device user space can use to communicate with the hypervisor.
+
+ To compile this driver as a module, choose M here.
+ The module will be called nsm.
+
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index f2a4d1ff65d4..ea6ea5bbbc9c 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -67,3 +67,4 @@ obj-$(CONFIG_TMR_MANAGER) += xilinx_tmr_manager.o
obj-$(CONFIG_TMR_INJECT) += xilinx_tmr_inject.o
obj-$(CONFIG_TPS6594_ESM) += tps6594-esm.o
obj-$(CONFIG_TPS6594_PFSM) += tps6594-pfsm.o
+obj-$(CONFIG_NSM) += nsm.o
diff --git a/drivers/misc/bcm-vk/bcm_vk_tty.c b/drivers/misc/bcm-vk/bcm_vk_tty.c
index 2bce835ca43e..59bab76ff0a9 100644
--- a/drivers/misc/bcm-vk/bcm_vk_tty.c
+++ b/drivers/misc/bcm-vk/bcm_vk_tty.c
@@ -64,9 +64,9 @@ static void bcm_vk_tty_wq_handler(struct work_struct *work)
struct bcm_vk_tty *vktty;
int card_status;
int count;
- unsigned char c;
int i;
int wr;
+ u8 c;
card_status = vkread32(vk, BAR_0, BAR_CARD_STATUS);
if (BCM_VK_INTF_IS_DOWN(card_status))
@@ -192,7 +192,7 @@ static ssize_t bcm_vk_tty_write(struct tty_struct *tty, const u8 *buffer,
int index;
struct bcm_vk *vk;
struct bcm_vk_tty *vktty;
- int i;
+ size_t i;
index = tty->index;
vk = dev_get_drvdata(tty->dev);
diff --git a/drivers/misc/cardreader/Makefile b/drivers/misc/cardreader/Makefile
index 895128475d83..1e1bca6b0b22 100644
--- a/drivers/misc/cardreader/Makefile
+++ b/drivers/misc/cardreader/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_MISC_ALCOR_PCI) += alcor_pci.o
obj-$(CONFIG_MISC_RTSX_PCI) += rtsx_pci.o
-rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o rts5260.o rts5261.o rts5228.o
+rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o rts5260.o rts5261.o rts5228.o rts5264.o
obj-$(CONFIG_MISC_RTSX_USB) += rtsx_usb.o
diff --git a/drivers/misc/cardreader/rts5264.c b/drivers/misc/cardreader/rts5264.c
new file mode 100644
index 000000000000..8be4ed7d9d47
--- /dev/null
+++ b/drivers/misc/cardreader/rts5264.c
@@ -0,0 +1,886 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * Author:
+ * Ricky Wu <ricky_wu@realtek.com>
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/rtsx_pci.h>
+
+#include "rts5264.h"
+#include "rtsx_pcr.h"
+
+static u8 rts5264_get_ic_version(struct rtsx_pcr *pcr)
+{
+ u8 val;
+
+ rtsx_pci_read_register(pcr, DUMMY_REG_RESET_0, &val);
+ return val & 0x0F;
+}
+
+static void rts5264_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
+{
+ u8 driving_3v3[4][3] = {
+ {0x88, 0x88, 0x88},
+ {0x77, 0x77, 0x77},
+ {0x99, 0x99, 0x99},
+ {0x66, 0x66, 0x66},
+ };
+ u8 driving_1v8[4][3] = {
+ {0x99, 0x99, 0x99},
+ {0x77, 0x77, 0x77},
+ {0xBB, 0xBB, 0xBB},
+ {0x65, 0x65, 0x65},
+ };
+ u8 (*driving)[3], drive_sel;
+
+ if (voltage == OUTPUT_3V3) {
+ driving = driving_3v3;
+ drive_sel = pcr->sd30_drive_sel_3v3;
+ } else {
+ driving = driving_1v8;
+ drive_sel = pcr->sd30_drive_sel_1v8;
+ }
+
+ rtsx_pci_write_register(pcr, SD30_CLK_DRIVE_SEL,
+ 0xFF, driving[drive_sel][0]);
+ rtsx_pci_write_register(pcr, SD30_CMD_DRIVE_SEL,
+ 0xFF, driving[drive_sel][1]);
+ rtsx_pci_write_register(pcr, SD30_DAT_DRIVE_SEL,
+ 0xFF, driving[drive_sel][2]);
+}
+
+static void rts5264_force_power_down(struct rtsx_pcr *pcr, u8 pm_state, bool runtime)
+{
+ /* Set relink_time to 0 */
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0);
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, MASK_8_BIT_DEF, 0);
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3,
+ RELINK_TIME_MASK, 0);
+
+ if (pm_state == HOST_ENTER_S3)
+ rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3,
+ D3_DELINK_MODE_EN, D3_DELINK_MODE_EN);
+
+ if (!runtime) {
+ rtsx_pci_write_register(pcr, RTS5264_AUTOLOAD_CFG1,
+ CD_RESUME_EN_MASK, 0);
+ rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x01, 0x00);
+ rtsx_pci_write_register(pcr, RTS5264_REG_PME_FORCE_CTL,
+ FORCE_PM_CONTROL | FORCE_PM_VALUE, FORCE_PM_CONTROL);
+ } else {
+ rtsx_pci_write_register(pcr, RTS5264_REG_PME_FORCE_CTL,
+ FORCE_PM_CONTROL | FORCE_PM_VALUE, 0);
+ rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x01, 0x01);
+ rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3,
+ D3_DELINK_MODE_EN, 0);
+ rtsx_pci_write_register(pcr, RTS5264_FW_CTL,
+ RTS5264_INFORM_RTD3_COLD, RTS5264_INFORM_RTD3_COLD);
+ rtsx_pci_write_register(pcr, RTS5264_AUTOLOAD_CFG4,
+ RTS5264_FORCE_PRSNT_LOW, RTS5264_FORCE_PRSNT_LOW);
+ }
+
+ rtsx_pci_write_register(pcr, RTS5264_REG_FPDCTL,
+ SSC_POWER_DOWN, SSC_POWER_DOWN);
+}
+
+static int rts5264_enable_auto_blink(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, OLT_LED_CTL,
+ LED_SHINE_MASK, LED_SHINE_EN);
+}
+
+static int rts5264_disable_auto_blink(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, OLT_LED_CTL,
+ LED_SHINE_MASK, LED_SHINE_DISABLE);
+}
+
+static int rts5264_turn_on_led(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, GPIO_CTL,
+ 0x02, 0x02);
+}
+
+static int rts5264_turn_off_led(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, GPIO_CTL,
+ 0x02, 0x00);
+}
+
+/* SD Pull Control Enable:
+ * SD_DAT[3:0] ==> pull up
+ * SD_CD ==> pull up
+ * SD_WP ==> pull up
+ * SD_CMD ==> pull up
+ * SD_CLK ==> pull down
+ */
+static const u32 rts5264_sd_pull_ctl_enable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
+ RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE9),
+ 0,
+};
+
+/* SD Pull Control Disable:
+ * SD_DAT[3:0] ==> pull down
+ * SD_CD ==> pull up
+ * SD_WP ==> pull down
+ * SD_CMD ==> pull down
+ * SD_CLK ==> pull down
+ */
+static const u32 rts5264_sd_pull_ctl_disable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD5),
+ 0,
+};
+
+static int rts5264_sd_set_sample_push_timing_sd30(struct rtsx_pcr *pcr)
+{
+ rtsx_pci_write_register(pcr, SD_CFG1, SD_MODE_SELECT_MASK
+ | SD_ASYNC_FIFO_NOT_RST, SD_30_MODE | SD_ASYNC_FIFO_NOT_RST);
+ rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, CLK_LOW_FREQ);
+ rtsx_pci_write_register(pcr, CARD_CLK_SOURCE, 0xFF,
+ CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1);
+ rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
+
+ return 0;
+}
+
+static int rts5264_card_power_on(struct rtsx_pcr *pcr, int card)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ if (option->ocp_en)
+ rtsx_pci_enable_ocp(pcr);
+
+ rtsx_pci_write_register(pcr, REG_CRC_DUMMY_0,
+ CFG_SD_POW_AUTO_PD, CFG_SD_POW_AUTO_PD);
+
+ rtsx_pci_write_register(pcr, RTS5264_LDO1_CFG1,
+ RTS5264_LDO1_TUNE_MASK, RTS5264_LDO1_33);
+ rtsx_pci_write_register(pcr, RTS5264_LDO1233318_POW_CTL,
+ RTS5264_LDO1_POWERON, RTS5264_LDO1_POWERON);
+ rtsx_pci_write_register(pcr, RTS5264_LDO1233318_POW_CTL,
+ RTS5264_LDO3318_POWERON, RTS5264_LDO3318_POWERON);
+
+ msleep(20);
+
+ rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, SD_OUTPUT_EN);
+
+ /* Initialize SD_CFG1 register */
+ rtsx_pci_write_register(pcr, SD_CFG1, 0xFF,
+ SD_CLK_DIVIDE_128 | SD_20_MODE | SD_BUS_WIDTH_1BIT);
+ rtsx_pci_write_register(pcr, SD_SAMPLE_POINT_CTL,
+ 0xFF, SD20_RX_POS_EDGE);
+ rtsx_pci_write_register(pcr, SD_PUSH_POINT_CTL, 0xFF, 0);
+ rtsx_pci_write_register(pcr, CARD_STOP, SD_STOP | SD_CLR_ERR,
+ SD_STOP | SD_CLR_ERR);
+
+ /* Reset SD_CFG3 register */
+ rtsx_pci_write_register(pcr, SD_CFG3, SD30_CLK_END_EN, 0);
+ rtsx_pci_write_register(pcr, REG_SD_STOP_SDCLK_CFG,
+ SD30_CLK_STOP_CFG_EN | SD30_CLK_STOP_CFG1 |
+ SD30_CLK_STOP_CFG0, 0);
+
+ if (pcr->extra_caps & EXTRA_CAPS_SD_SDR50 ||
+ pcr->extra_caps & EXTRA_CAPS_SD_SDR104)
+ rts5264_sd_set_sample_push_timing_sd30(pcr);
+
+ return 0;
+}
+
+static int rts5264_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+ rtsx_pci_write_register(pcr, RTS5264_CARD_PWR_CTL,
+ RTS5264_PUPDC, RTS5264_PUPDC);
+
+ switch (voltage) {
+ case OUTPUT_3V3:
+ rtsx_pci_write_register(pcr, RTS5264_LDO1233318_POW_CTL,
+ RTS5264_TUNE_REF_LDO3318, RTS5264_TUNE_REF_LDO3318);
+ rtsx_pci_write_register(pcr, RTS5264_DV3318_CFG,
+ RTS5264_DV3318_TUNE_MASK, RTS5264_DV3318_33);
+ rtsx_pci_write_register(pcr, SD_PAD_CTL,
+ SD_IO_USING_1V8, 0);
+ break;
+ case OUTPUT_1V8:
+ rtsx_pci_write_register(pcr, RTS5264_LDO1233318_POW_CTL,
+ RTS5264_TUNE_REF_LDO3318, RTS5264_TUNE_REF_LDO3318_DFT);
+ rtsx_pci_write_register(pcr, RTS5264_DV3318_CFG,
+ RTS5264_DV3318_TUNE_MASK, RTS5264_DV3318_18);
+ rtsx_pci_write_register(pcr, SD_PAD_CTL,
+ SD_IO_USING_1V8, SD_IO_USING_1V8);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* set pad drive */
+ rts5264_fill_driving(pcr, voltage);
+
+ return 0;
+}
+
+static void rts5264_stop_cmd(struct rtsx_pcr *pcr)
+{
+ rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
+ rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
+ rtsx_pci_write_register(pcr, DMACTL, DMA_RST, DMA_RST);
+ rtsx_pci_write_register(pcr, RBCTL, RB_FLUSH, RB_FLUSH);
+}
+
+static void rts5264_card_before_power_off(struct rtsx_pcr *pcr)
+{
+ rts5264_stop_cmd(pcr);
+ rts5264_switch_output_voltage(pcr, OUTPUT_3V3);
+}
+
+static int rts5264_card_power_off(struct rtsx_pcr *pcr, int card)
+{
+ int err = 0;
+
+ rts5264_card_before_power_off(pcr);
+ err = rtsx_pci_write_register(pcr, RTS5264_LDO1233318_POW_CTL,
+ RTS5264_LDO_POWERON_MASK, 0);
+
+ rtsx_pci_write_register(pcr, REG_CRC_DUMMY_0,
+ CFG_SD_POW_AUTO_PD, 0);
+ if (pcr->option.ocp_en)
+ rtsx_pci_disable_ocp(pcr);
+
+ return err;
+}
+
+static void rts5264_enable_ocp(struct rtsx_pcr *pcr)
+{
+ u8 mask = 0;
+ u8 val = 0;
+
+ rtsx_pci_write_register(pcr, RTS5264_LDO1_CFG0,
+ RTS5264_LDO1_OCP_EN | RTS5264_LDO1_OCP_LMT_EN,
+ RTS5264_LDO1_OCP_EN | RTS5264_LDO1_OCP_LMT_EN);
+ rtsx_pci_write_register(pcr, RTS5264_LDO2_CFG0,
+ RTS5264_LDO2_OCP_EN | RTS5264_LDO2_OCP_LMT_EN,
+ RTS5264_LDO2_OCP_EN | RTS5264_LDO2_OCP_LMT_EN);
+ rtsx_pci_write_register(pcr, RTS5264_LDO3_CFG0,
+ RTS5264_LDO3_OCP_EN | RTS5264_LDO3_OCP_LMT_EN,
+ RTS5264_LDO3_OCP_EN | RTS5264_LDO3_OCP_LMT_EN);
+ rtsx_pci_write_register(pcr, RTS5264_OVP_DET,
+ RTS5264_POW_VDET, RTS5264_POW_VDET);
+
+ mask = SD_OCP_INT_EN | SD_DETECT_EN;
+ mask |= SDVIO_OCP_INT_EN | SDVIO_DETECT_EN;
+ val = mask;
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
+
+ mask = SD_VDD3_OCP_INT_EN | SD_VDD3_DETECT_EN;
+ val = mask;
+ rtsx_pci_write_register(pcr, RTS5264_OCP_VDD3_CTL, mask, val);
+
+ mask = RTS5264_OVP_INT_EN | RTS5264_OVP_DETECT_EN;
+ val = mask;
+ rtsx_pci_write_register(pcr, RTS5264_OVP_CTL, mask, val);
+}
+
+static void rts5264_disable_ocp(struct rtsx_pcr *pcr)
+{
+ u8 mask = 0;
+
+ mask = SD_OCP_INT_EN | SD_DETECT_EN;
+ mask |= SDVIO_OCP_INT_EN | SDVIO_DETECT_EN;
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
+
+ mask = SD_VDD3_OCP_INT_EN | SD_VDD3_DETECT_EN;
+ rtsx_pci_write_register(pcr, RTS5264_OCP_VDD3_CTL, mask, 0);
+
+ mask = RTS5264_OVP_INT_EN | RTS5264_OVP_DETECT_EN;
+ rtsx_pci_write_register(pcr, RTS5264_OVP_CTL, mask, 0);
+
+ rtsx_pci_write_register(pcr, RTS5264_LDO1_CFG0,
+ RTS5264_LDO1_OCP_EN | RTS5264_LDO1_OCP_LMT_EN, 0);
+ rtsx_pci_write_register(pcr, RTS5264_LDO2_CFG0,
+ RTS5264_LDO2_OCP_EN | RTS5264_LDO2_OCP_LMT_EN, 0);
+ rtsx_pci_write_register(pcr, RTS5264_LDO3_CFG0,
+ RTS5264_LDO3_OCP_EN | RTS5264_LDO3_OCP_LMT_EN, 0);
+ rtsx_pci_write_register(pcr, RTS5264_OVP_DET, RTS5264_POW_VDET, 0);
+}
+
+static void rts5264_init_ocp(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ if (option->ocp_en) {
+ u8 mask, val;
+
+ rtsx_pci_write_register(pcr, RTS5264_LDO1_CFG0,
+ RTS5264_LDO1_OCP_THD_MASK, option->sd_800mA_ocp_thd);
+ rtsx_pci_write_register(pcr, RTS5264_LDO1_CFG0,
+ RTS5264_LDO1_OCP_LMT_THD_MASK,
+ RTS5264_LDO1_LMT_THD_2000);
+
+ rtsx_pci_write_register(pcr, RTS5264_LDO2_CFG0,
+ RTS5264_LDO2_OCP_THD_MASK, RTS5264_LDO2_OCP_THD_950);
+ rtsx_pci_write_register(pcr, RTS5264_LDO2_CFG0,
+ RTS5264_LDO2_OCP_LMT_THD_MASK,
+ RTS5264_LDO2_LMT_THD_2000);
+
+ rtsx_pci_write_register(pcr, RTS5264_LDO3_CFG0,
+ RTS5264_LDO3_OCP_THD_MASK, RTS5264_LDO3_OCP_THD_710);
+ rtsx_pci_write_register(pcr, RTS5264_LDO3_CFG0,
+ RTS5264_LDO3_OCP_LMT_THD_MASK,
+ RTS5264_LDO3_LMT_THD_1500);
+
+ rtsx_pci_write_register(pcr, RTS5264_OVP_DET,
+ RTS5264_TUNE_VROV_MASK, RTS5264_TUNE_VROV_1V6);
+
+ mask = SD_OCP_GLITCH_MASK | SDVIO_OCP_GLITCH_MASK;
+ val = pcr->hw_param.ocp_glitch;
+ rtsx_pci_write_register(pcr, REG_OCPGLITCH, mask, val);
+
+ } else {
+ rtsx_pci_write_register(pcr, RTS5264_LDO1_CFG0,
+ RTS5264_LDO1_OCP_EN | RTS5264_LDO1_OCP_LMT_EN, 0);
+ rtsx_pci_write_register(pcr, RTS5264_LDO2_CFG0,
+ RTS5264_LDO2_OCP_EN | RTS5264_LDO2_OCP_LMT_EN, 0);
+ rtsx_pci_write_register(pcr, RTS5264_LDO3_CFG0,
+ RTS5264_LDO3_OCP_EN | RTS5264_LDO3_OCP_LMT_EN, 0);
+ rtsx_pci_write_register(pcr, RTS5264_OVP_DET,
+ RTS5264_POW_VDET, 0);
+ }
+}
+
+static int rts5264_get_ocpstat2(struct rtsx_pcr *pcr, u8 *val)
+{
+ return rtsx_pci_read_register(pcr, RTS5264_OCP_VDD3_STS, val);
+}
+
+static int rts5264_get_ovpstat(struct rtsx_pcr *pcr, u8 *val)
+{
+ return rtsx_pci_read_register(pcr, RTS5264_OVP_STS, val);
+}
+
+static void rts5264_clear_ocpstat(struct rtsx_pcr *pcr)
+{
+ u8 mask = 0;
+ u8 val = 0;
+
+ mask = SD_OCP_INT_CLR | SD_OC_CLR;
+ mask |= SDVIO_OCP_INT_CLR | SDVIO_OC_CLR;
+ val = mask;
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
+ rtsx_pci_write_register(pcr, RTS5264_OCP_VDD3_CTL,
+ SD_VDD3_OCP_INT_CLR | SD_VDD3_OC_CLR,
+ SD_VDD3_OCP_INT_CLR | SD_VDD3_OC_CLR);
+ rtsx_pci_write_register(pcr, RTS5264_OVP_CTL,
+ RTS5264_OVP_INT_CLR | RTS5264_OVP_CLR,
+ RTS5264_OVP_INT_CLR | RTS5264_OVP_CLR);
+
+ udelay(1000);
+
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
+ rtsx_pci_write_register(pcr, RTS5264_OCP_VDD3_CTL,
+ SD_VDD3_OCP_INT_CLR | SD_VDD3_OC_CLR, 0);
+ rtsx_pci_write_register(pcr, RTS5264_OVP_CTL,
+ RTS5264_OVP_INT_CLR | RTS5264_OVP_CLR, 0);
+}
+
+static void rts5264_process_ocp(struct rtsx_pcr *pcr)
+{
+ if (!pcr->option.ocp_en)
+ return;
+
+ rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat);
+ rts5264_get_ocpstat2(pcr, &pcr->ocp_stat2);
+ rts5264_get_ovpstat(pcr, &pcr->ovp_stat);
+
+ if ((pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER | SDVIO_OC_NOW | SDVIO_OC_EVER)) ||
+ (pcr->ocp_stat2 & (SD_VDD3_OC_NOW | SD_VDD3_OC_EVER)) ||
+ (pcr->ovp_stat & (RTS5264_OVP_NOW | RTS5264_OVP_EVER))) {
+ rts5264_clear_ocpstat(pcr);
+ rts5264_card_power_off(pcr, RTSX_SD_CARD);
+ rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
+ pcr->ocp_stat = 0;
+ pcr->ocp_stat2 = 0;
+ pcr->ovp_stat = 0;
+ }
+}
+
+static void rts5264_init_from_hw(struct rtsx_pcr *pcr)
+{
+ struct pci_dev *pdev = pcr->pci;
+ u32 lval1, lval2, i;
+ u16 setting_reg1, setting_reg2;
+ u8 valid, efuse_valid, tmp;
+
+ rtsx_pci_write_register(pcr, RTS5264_REG_PME_FORCE_CTL,
+ REG_EFUSE_POR | REG_EFUSE_POWER_MASK,
+ REG_EFUSE_POR | REG_EFUSE_POWERON);
+ udelay(1);
+ rtsx_pci_write_register(pcr, RTS5264_EFUSE_ADDR,
+ RTS5264_EFUSE_ADDR_MASK, 0x00);
+ rtsx_pci_write_register(pcr, RTS5264_EFUSE_CTL,
+ RTS5264_EFUSE_ENABLE | RTS5264_EFUSE_MODE_MASK,
+ RTS5264_EFUSE_ENABLE);
+
+ /* Wait transfer end */
+ for (i = 0; i < MAX_RW_REG_CNT; i++) {
+ rtsx_pci_read_register(pcr, RTS5264_EFUSE_CTL, &tmp);
+ if ((tmp & 0x80) == 0)
+ break;
+ }
+ rtsx_pci_read_register(pcr, RTS5264_EFUSE_READ_DATA, &tmp);
+ efuse_valid = ((tmp & 0x0C) >> 2);
+ pcr_dbg(pcr, "Load efuse valid: 0x%x\n", efuse_valid);
+
+ pci_read_config_dword(pdev, PCR_SETTING_REG2, &lval2);
+ pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, lval2);
+ /* 0x816 */
+ valid = (u8)((lval2 >> 16) & 0x03);
+
+ rtsx_pci_write_register(pcr, RTS5264_REG_PME_FORCE_CTL,
+ REG_EFUSE_POR, 0);
+ pcr_dbg(pcr, "Disable efuse por!\n");
+
+ if (efuse_valid == 2 || efuse_valid == 3) {
+ if (valid == 3) {
+ /* Bypass efuse */
+ setting_reg1 = PCR_SETTING_REG1;
+ setting_reg2 = PCR_SETTING_REG2;
+ } else {
+ /* Use efuse data */
+ setting_reg1 = PCR_SETTING_REG4;
+ setting_reg2 = PCR_SETTING_REG5;
+ }
+ } else if (efuse_valid == 0) {
+ // default
+ setting_reg1 = PCR_SETTING_REG1;
+ setting_reg2 = PCR_SETTING_REG2;
+ } else {
+ return;
+ }
+
+ pci_read_config_dword(pdev, setting_reg2, &lval2);
+ pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", setting_reg2, lval2);
+
+ if (!rts5264_vendor_setting_valid(lval2)) {
+ pcr_dbg(pcr, "skip fetch vendor setting\n");
+ return;
+ }
+
+ pcr->rtd3_en = rts5264_reg_to_rtd3(lval2);
+
+ if (rts5264_reg_check_reverse_socket(lval2))
+ pcr->flags |= PCR_REVERSE_SOCKET;
+
+ pci_read_config_dword(pdev, setting_reg1, &lval1);
+ pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", setting_reg1, lval1);
+
+ pcr->aspm_en = rts5264_reg_to_aspm(lval1);
+ pcr->sd30_drive_sel_1v8 = rts5264_reg_to_sd30_drive_sel_1v8(lval1);
+ pcr->sd30_drive_sel_3v3 = rts5264_reg_to_sd30_drive_sel_3v3(lval1);
+
+ if (setting_reg1 == PCR_SETTING_REG1) {
+ /* store setting */
+ rtsx_pci_write_register(pcr, 0xFF0C, 0xFF, (u8)(lval1 & 0xFF));
+ rtsx_pci_write_register(pcr, 0xFF0D, 0xFF, (u8)((lval1 >> 8) & 0xFF));
+ rtsx_pci_write_register(pcr, 0xFF0E, 0xFF, (u8)((lval1 >> 16) & 0xFF));
+ rtsx_pci_write_register(pcr, 0xFF0F, 0xFF, (u8)((lval1 >> 24) & 0xFF));
+ rtsx_pci_write_register(pcr, 0xFF10, 0xFF, (u8)(lval2 & 0xFF));
+ rtsx_pci_write_register(pcr, 0xFF11, 0xFF, (u8)((lval2 >> 8) & 0xFF));
+ rtsx_pci_write_register(pcr, 0xFF12, 0xFF, (u8)((lval2 >> 16) & 0xFF));
+
+ pci_write_config_dword(pdev, PCR_SETTING_REG4, lval1);
+ lval2 = lval2 & 0x00FFFFFF;
+ pci_write_config_dword(pdev, PCR_SETTING_REG5, lval2);
+ }
+}
+
+static void rts5264_init_from_cfg(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
+ | PM_L1_1_EN | PM_L1_2_EN))
+ rtsx_pci_disable_oobs_polling(pcr);
+ else
+ rtsx_pci_enable_oobs_polling(pcr);
+
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0xFF, 0);
+
+ if (option->ltr_en) {
+ if (option->ltr_enabled)
+ rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
+ }
+}
+
+static int rts5264_extra_init_hw(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ rtsx_pci_write_register(pcr, RTS5264_AUTOLOAD_CFG1,
+ CD_RESUME_EN_MASK, CD_RESUME_EN_MASK);
+ rtsx_pci_write_register(pcr, REG_VREF, PWD_SUSPND_EN, PWD_SUSPND_EN);
+
+ rts5264_init_from_cfg(pcr);
+ rts5264_init_from_hw(pcr);
+
+ /* power off efuse */
+ rtsx_pci_write_register(pcr, RTS5264_REG_PME_FORCE_CTL,
+ REG_EFUSE_POWER_MASK, REG_EFUSE_POWEROFF);
+ rtsx_pci_write_register(pcr, RTS5264_AUTOLOAD_CFG2,
+ RTS5264_CHIP_RST_N_SEL, 0);
+ rtsx_pci_write_register(pcr, RTS5264_REG_LDO12_CFG,
+ RTS5264_LDO12_SR_MASK, RTS5264_LDO12_SR_0_0_MS);
+ rtsx_pci_write_register(pcr, CDGW, 0xFF, 0x01);
+ rtsx_pci_write_register(pcr, RTS5264_CKMUX_MBIAS_PWR,
+ RTS5264_POW_CKMUX, RTS5264_POW_CKMUX);
+ rtsx_pci_write_register(pcr, RTS5264_CMD_OE_START_EARLY,
+ RTS5264_CMD_OE_EARLY_EN | RTS5264_CMD_OE_EARLY_CYCLE_MASK,
+ RTS5264_CMD_OE_EARLY_EN);
+ rtsx_pci_write_register(pcr, RTS5264_DAT_OE_START_EARLY,
+ RTS5264_DAT_OE_EARLY_EN | RTS5264_DAT_OE_EARLY_CYCLE_MASK,
+ RTS5264_DAT_OE_EARLY_EN);
+ rtsx_pci_write_register(pcr, SSC_DIV_N_0, 0xFF, 0x5D);
+
+ rtsx_pci_write_register(pcr, RTS5264_PWR_CUT,
+ RTS5264_CFG_MEM_PD, RTS5264_CFG_MEM_PD);
+ rtsx_pci_write_register(pcr, L1SUB_CONFIG1,
+ AUX_CLK_ACTIVE_SEL_MASK, MAC_CKSW_DONE);
+ rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, 0);
+ rtsx_pci_write_register(pcr, RTS5264_AUTOLOAD_CFG4,
+ RTS5264_AUX_CLK_16M_EN, 0);
+
+ /* Release PRSNT# */
+ rtsx_pci_write_register(pcr, RTS5264_AUTOLOAD_CFG4,
+ RTS5264_FORCE_PRSNT_LOW, 0);
+ rtsx_pci_write_register(pcr, PCLK_CTL,
+ PCLK_MODE_SEL, PCLK_MODE_SEL);
+
+ /* LED shine disabled, set initial shine cycle period */
+ rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x0F, 0x02);
+
+ /* Configure driving */
+ rts5264_fill_driving(pcr, OUTPUT_3V3);
+
+ if (pcr->flags & PCR_REVERSE_SOCKET)
+ rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x30);
+ else
+ rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x00);
+
+ /*
+ * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
+ * to drive low, and we forcibly request clock.
+ */
+ if (option->force_clkreq_0)
+ rtsx_pci_write_register(pcr, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
+ else
+ rtsx_pci_write_register(pcr, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
+
+ rtsx_pci_write_register(pcr, PWD_SUSPEND_EN, 0xFF, 0xFF);
+ rtsx_pci_write_register(pcr, RBCTL, U_AUTO_DMA_EN_MASK, 0);
+ rtsx_pci_write_register(pcr, RTS5264_AUTOLOAD_CFG4,
+ RTS5264_F_HIGH_RC_MASK, RTS5264_F_HIGH_RC_400K);
+
+ if (pcr->rtd3_en) {
+ rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x01, 0x00);
+ rtsx_pci_write_register(pcr, RTS5264_REG_PME_FORCE_CTL,
+ FORCE_PM_CONTROL | FORCE_PM_VALUE, 0);
+ } else {
+ rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x01, 0x00);
+ rtsx_pci_write_register(pcr, RTS5264_REG_PME_FORCE_CTL,
+ FORCE_PM_CONTROL | FORCE_PM_VALUE, FORCE_PM_CONTROL);
+ }
+ rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, D3_DELINK_MODE_EN, 0x00);
+
+ /* Clear Enter RTD3_cold Information*/
+ rtsx_pci_write_register(pcr, RTS5264_FW_CTL,
+ RTS5264_INFORM_RTD3_COLD, 0);
+
+ return 0;
+}
+
+static void rts5264_enable_aspm(struct rtsx_pcr *pcr, bool enable)
+{
+ u8 val = FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1;
+ u8 mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1;
+
+ if (pcr->aspm_enabled == enable)
+ return;
+
+ val |= (pcr->aspm_en & 0x02);
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
+ pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPMC, pcr->aspm_en);
+ pcr->aspm_enabled = enable;
+}
+
+static void rts5264_disable_aspm(struct rtsx_pcr *pcr, bool enable)
+{
+ u8 val = FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1;
+ u8 mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1;
+
+ if (pcr->aspm_enabled == enable)
+ return;
+
+ pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPMC, 0);
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
+ rtsx_pci_write_register(pcr, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0);
+ udelay(10);
+ pcr->aspm_enabled = enable;
+}
+
+static void rts5264_set_aspm(struct rtsx_pcr *pcr, bool enable)
+{
+ if (enable)
+ rts5264_enable_aspm(pcr, true);
+ else
+ rts5264_disable_aspm(pcr, false);
+}
+
+static void rts5264_set_l1off_cfg_sub_d0(struct rtsx_pcr *pcr, int active)
+{
+ struct rtsx_cr_option *option = &(pcr->option);
+
+ u32 interrupt = rtsx_pci_readl(pcr, RTSX_BIPR);
+ int card_exist = (interrupt & SD_EXIST);
+ int aspm_L1_1, aspm_L1_2;
+ u8 val = 0;
+
+ aspm_L1_1 = rtsx_check_dev_flag(pcr, ASPM_L1_1_EN);
+ aspm_L1_2 = rtsx_check_dev_flag(pcr, ASPM_L1_2_EN);
+
+ if (active) {
+ /* Run, latency: 60us */
+ if (aspm_L1_1)
+ val = option->ltr_l1off_snooze_sspwrgate;
+ } else {
+ /* L1off, latency: 300us */
+ if (aspm_L1_2)
+ val = option->ltr_l1off_sspwrgate;
+ }
+
+ if (aspm_L1_1 || aspm_L1_2) {
+ if (rtsx_check_dev_flag(pcr,
+ LTR_L1SS_PWR_GATE_CHECK_CARD_EN)) {
+ if (card_exist)
+ val &= ~L1OFF_MBIAS2_EN_5250;
+ else
+ val |= L1OFF_MBIAS2_EN_5250;
+ }
+ }
+ rtsx_set_l1off_sub(pcr, val);
+}
+
+static const struct pcr_ops rts5264_pcr_ops = {
+ .turn_on_led = rts5264_turn_on_led,
+ .turn_off_led = rts5264_turn_off_led,
+ .extra_init_hw = rts5264_extra_init_hw,
+ .enable_auto_blink = rts5264_enable_auto_blink,
+ .disable_auto_blink = rts5264_disable_auto_blink,
+ .card_power_on = rts5264_card_power_on,
+ .card_power_off = rts5264_card_power_off,
+ .switch_output_voltage = rts5264_switch_output_voltage,
+ .force_power_down = rts5264_force_power_down,
+ .stop_cmd = rts5264_stop_cmd,
+ .set_aspm = rts5264_set_aspm,
+ .set_l1off_cfg_sub_d0 = rts5264_set_l1off_cfg_sub_d0,
+ .enable_ocp = rts5264_enable_ocp,
+ .disable_ocp = rts5264_disable_ocp,
+ .init_ocp = rts5264_init_ocp,
+ .process_ocp = rts5264_process_ocp,
+ .clear_ocpstat = rts5264_clear_ocpstat,
+};
+
+static inline u8 double_ssc_depth(u8 depth)
+{
+ return ((depth > 1) ? (depth - 1) : depth);
+}
+
+int rts5264_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
+ u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk)
+{
+ int err, clk;
+ u16 n;
+ u8 clk_divider, mcu_cnt, div;
+ static const u8 depth[] = {
+ [RTSX_SSC_DEPTH_4M] = RTS5264_SSC_DEPTH_4M,
+ [RTSX_SSC_DEPTH_2M] = RTS5264_SSC_DEPTH_2M,
+ [RTSX_SSC_DEPTH_1M] = RTS5264_SSC_DEPTH_1M,
+ [RTSX_SSC_DEPTH_500K] = RTS5264_SSC_DEPTH_512K,
+ };
+
+ if (initial_mode) {
+ /* We use 250k(around) here, in initial stage */
+ clk_divider = SD_CLK_DIVIDE_128;
+ card_clock = 30000000;
+ } else {
+ clk_divider = SD_CLK_DIVIDE_0;
+ }
+ err = rtsx_pci_write_register(pcr, SD_CFG1,
+ SD_CLK_DIVIDE_MASK, clk_divider);
+ if (err < 0)
+ return err;
+
+ card_clock /= 1000000;
+ pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock);
+
+ clk = card_clock;
+ if (!initial_mode && double_clk)
+ clk = card_clock * 2;
+ pcr_dbg(pcr, "Internal SSC clock: %dMHz (cur_clock = %d)\n",
+ clk, pcr->cur_clock);
+
+ if (clk == pcr->cur_clock)
+ return 0;
+
+ if (pcr->ops->conv_clk_and_div_n)
+ n = pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
+ else
+ n = clk - 4;
+ if ((clk <= 4) || (n > 396))
+ return -EINVAL;
+
+ mcu_cnt = 125/clk + 3;
+ if (mcu_cnt > 15)
+ mcu_cnt = 15;
+
+ div = CLK_DIV_1;
+ while ((n < MIN_DIV_N_PCR - 4) && (div < CLK_DIV_8)) {
+ if (pcr->ops->conv_clk_and_div_n) {
+ int dbl_clk = pcr->ops->conv_clk_and_div_n(n,
+ DIV_N_TO_CLK) * 2;
+ n = pcr->ops->conv_clk_and_div_n(dbl_clk,
+ CLK_TO_DIV_N);
+ } else {
+ n = (n + 4) * 2 - 4;
+ }
+ div++;
+ }
+
+ n = (n / 2) - 1;
+ pcr_dbg(pcr, "n = %d, div = %d\n", n, div);
+
+ ssc_depth = depth[ssc_depth];
+ if (double_clk)
+ ssc_depth = double_ssc_depth(ssc_depth);
+
+ if (ssc_depth) {
+ if (div == CLK_DIV_2) {
+ if (ssc_depth > 1)
+ ssc_depth -= 1;
+ else
+ ssc_depth = RTS5264_SSC_DEPTH_8M;
+ } else if (div == CLK_DIV_4) {
+ if (ssc_depth > 2)
+ ssc_depth -= 2;
+ else
+ ssc_depth = RTS5264_SSC_DEPTH_8M;
+ } else if (div == CLK_DIV_8) {
+ if (ssc_depth > 3)
+ ssc_depth -= 3;
+ else
+ ssc_depth = RTS5264_SSC_DEPTH_8M;
+ }
+ } else {
+ ssc_depth = 0;
+ }
+ pcr_dbg(pcr, "ssc_depth = %d\n", ssc_depth);
+
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL,
+ CHANGE_CLK, CHANGE_CLK);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV,
+ 0xFF, (div << 4) | mcu_cnt);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2,
+ SSC_DEPTH_MASK, ssc_depth);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n);
+
+ if (is_version(pcr, 0x5264, IC_VER_A)) {
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RTS5264_CARD_CLK_SRC2,
+ RTS5264_REG_BIG_KVCO_A, 0);
+ } else {
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RTS5264_SYS_DUMMY_1,
+ RTS5264_REG_BIG_KVCO, 0);
+ }
+
+ if (vpclk) {
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
+ PHASE_NOT_RESET, 0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK1_CTL,
+ PHASE_NOT_RESET, 0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
+ PHASE_NOT_RESET, PHASE_NOT_RESET);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK1_CTL,
+ PHASE_NOT_RESET, PHASE_NOT_RESET);
+ }
+
+ err = rtsx_pci_send_cmd(pcr, 2000);
+ if (err < 0)
+ return err;
+
+ /* Wait SSC clock stable */
+ udelay(SSC_CLOCK_STABLE_WAIT);
+ err = rtsx_pci_write_register(pcr, CLK_CTL, CHANGE_CLK, 0);
+ if (err < 0)
+ return err;
+
+ pcr->cur_clock = clk;
+ return 0;
+}
+
+void rts5264_init_params(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ struct rtsx_hw_param *hw_param = &pcr->hw_param;
+ u8 val;
+
+ pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104;
+ pcr->extra_caps |= EXTRA_CAPS_NO_MMC;
+ rtsx_pci_read_register(pcr, RTS5264_FW_STATUS, &val);
+ if (!(val & RTS5264_EXPRESS_LINK_FAIL_MASK))
+ pcr->extra_caps |= EXTRA_CAPS_SD_EXPRESS;
+ pcr->num_slots = 1;
+ pcr->ops = &rts5264_pcr_ops;
+
+ pcr->flags = 0;
+ pcr->card_drive_sel = RTSX_CARD_DRIVE_DEFAULT;
+ pcr->sd30_drive_sel_1v8 = 0x00;
+ pcr->sd30_drive_sel_3v3 = 0x00;
+ pcr->aspm_en = ASPM_L1_EN;
+ pcr->aspm_mode = ASPM_MODE_REG;
+ pcr->tx_initial_phase = SET_CLOCK_PHASE(24, 24, 11);
+ pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
+
+ pcr->ic_version = rts5264_get_ic_version(pcr);
+ pcr->sd_pull_ctl_enable_tbl = rts5264_sd_pull_ctl_enable_tbl;
+ pcr->sd_pull_ctl_disable_tbl = rts5264_sd_pull_ctl_disable_tbl;
+
+ pcr->reg_pm_ctrl3 = RTS5264_AUTOLOAD_CFG3;
+
+ option->dev_flags = (LTR_L1SS_PWR_GATE_CHECK_CARD_EN
+ | LTR_L1SS_PWR_GATE_EN);
+ option->ltr_en = true;
+
+ /* init latency of active, idle, L1OFF to 60us, 300us, 3ms */
+ option->ltr_active_latency = LTR_ACTIVE_LATENCY_DEF;
+ option->ltr_idle_latency = LTR_IDLE_LATENCY_DEF;
+ option->ltr_l1off_latency = LTR_L1OFF_LATENCY_DEF;
+ option->l1_snooze_delay = L1_SNOOZE_DELAY_DEF;
+ option->ltr_l1off_sspwrgate = 0x7F;
+ option->ltr_l1off_snooze_sspwrgate = 0x78;
+
+ option->ocp_en = 1;
+ hw_param->interrupt_en |= (SD_OC_INT_EN | SD_OVP_INT_EN);
+ hw_param->ocp_glitch = SD_OCP_GLITCH_800U | SDVIO_OCP_GLITCH_800U;
+ option->sd_800mA_ocp_thd = RTS5264_LDO1_OCP_THD_1150;
+}
diff --git a/drivers/misc/cardreader/rts5264.h b/drivers/misc/cardreader/rts5264.h
new file mode 100644
index 000000000000..e3cbbf2fe1a4
--- /dev/null
+++ b/drivers/misc/cardreader/rts5264.h
@@ -0,0 +1,278 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2018-2019 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * Author:
+ * Ricky Wu <ricky_wu@realtek.com>
+ */
+#ifndef RTS5264_H
+#define RTS5264_H
+
+/*New add*/
+#define rts5264_vendor_setting_valid(reg) ((reg) & 0x010000)
+#define rts5264_reg_to_aspm(reg) \
+ (((~(reg) >> 28) & 0x02) | (((reg) >> 28) & 0x01))
+#define rts5264_reg_check_reverse_socket(reg) ((reg) & 0x04)
+#define rts5264_reg_to_sd30_drive_sel_1v8(reg) (((reg) >> 22) & 0x03)
+#define rts5264_reg_to_sd30_drive_sel_3v3(reg) (((reg) >> 16) & 0x03)
+#define rts5264_reg_to_rtd3(reg) ((reg) & 0x08)
+
+#define RTS5264_AUTOLOAD_CFG0 0xFF7B
+#define RTS5264_AUTOLOAD_CFG1 0xFF7C
+#define RTS5264_AUTOLOAD_CFG3 0xFF7E
+#define RTS5264_AUTOLOAD_CFG4 0xFF7F
+#define RTS5264_FORCE_PRSNT_LOW (1 << 6)
+#define RTS5264_AUX_CLK_16M_EN (1 << 5)
+#define RTS5264_F_HIGH_RC_MASK (1 << 4)
+#define RTS5264_F_HIGH_RC_1_6M (1 << 4)
+#define RTS5264_F_HIGH_RC_400K (0 << 4)
+
+/* SSC_CTL2 0xFC12 */
+#define RTS5264_SSC_DEPTH_MASK 0x07
+#define RTS5264_SSC_DEPTH_DISALBE 0x00
+#define RTS5264_SSC_DEPTH_8M 0x01
+#define RTS5264_SSC_DEPTH_4M 0x02
+#define RTS5264_SSC_DEPTH_2M 0x03
+#define RTS5264_SSC_DEPTH_1M 0x04
+#define RTS5264_SSC_DEPTH_512K 0x05
+#define RTS5264_SSC_DEPTH_256K 0x06
+#define RTS5264_SSC_DEPTH_128K 0x07
+
+#define RTS5264_CARD_CLK_SRC2 0xFC2F
+#define RTS5264_REG_BIG_KVCO_A 0x20
+
+/* efuse control register*/
+#define RTS5264_EFUSE_CTL 0xFC30
+#define RTS5264_EFUSE_ENABLE 0x80
+/* EFUSE_MODE: 0=READ 1=PROGRAM */
+#define RTS5264_EFUSE_MODE_MASK 0x40
+#define RTS5264_EFUSE_PROGRAM 0x40
+
+#define RTS5264_EFUSE_ADDR 0xFC31
+#define RTS5264_EFUSE_ADDR_MASK 0x3F
+
+#define RTS5264_EFUSE_WRITE_DATA 0xFC32
+#define RTS5264_EFUSE_READ_DATA 0xFC34
+
+#define RTS5264_SYS_DUMMY_1 0xFC35
+#define RTS5264_REG_BIG_KVCO 0x04
+
+/* DMACTL 0xFE2C */
+#define RTS5264_DMA_PACK_SIZE_MASK 0x70
+
+#define RTS5264_FW_CFG1 0xFF55
+#define RTS5264_SYS_CLK_SEL_MCU_CLK (0x01<<7)
+#define RTS5264_CRC_CLK_SEL_MCU_CLK (0x01<<6)
+#define RTS5264_FAKE_MCU_CLOCK_GATING (0x01<<5)
+#define RTS5264_MCU_BUS_SEL_MASK (0x01<<4)
+
+/* FW status register */
+#define RTS5264_FW_STATUS 0xFF56
+#define RTS5264_EXPRESS_LINK_FAIL_MASK (0x01<<7)
+
+/* FW control register */
+#define RTS5264_FW_CTL 0xFF5F
+#define RTS5264_INFORM_RTD3_COLD (0x01<<5)
+
+#define RTS5264_REG_FPDCTL 0xFF60
+
+#define RTS5264_REG_LDO12_CFG 0xFF6E
+#define RTS5264_LDO12_SR_MASK (0x03<<6)
+#define RTS5264_LDO12_SR_1_0_MS (0x03<<6)
+#define RTS5264_LDO12_SR_0_5_MS (0x02<<6)
+#define RTS5264_LDO12_SR_0_2_5_MS (0x01<<6)
+#define RTS5264_LDO12_SR_0_0_MS (0x00<<6)
+#define RTS5264_LDO12_VO_TUNE_MASK (0x07<<1)
+#define RTS5264_LDO12_115 (0x03<<1)
+#define RTS5264_LDO12_120 (0x04<<1)
+#define RTS5264_LDO12_125 (0x05<<1)
+#define RTS5264_LDO12_130 (0x06<<1)
+#define RTS5264_LDO12_135 (0x07<<1)
+
+/* LDO control register */
+#define RTS5264_CARD_PWR_CTL 0xFD50
+#define RTS5264_SD_CLK_ISO (0x01<<7)
+#define RTS5264_PAD_SD_DAT_FW_CTRL (0x01<<6)
+#define RTS5264_PUPDC (0x01<<5)
+#define RTS5264_SD_CMD_ISO (0x01<<4)
+
+#define RTS5264_OCP_VDD3_CTL 0xFD89
+#define SD_VDD3_DETECT_EN 0x08
+#define SD_VDD3_OCP_INT_EN 0x04
+#define SD_VDD3_OCP_INT_CLR 0x02
+#define SD_VDD3_OC_CLR 0x01
+
+#define RTS5264_OCP_VDD3_STS 0xFD8A
+#define SD_VDD3_OCP_DETECT 0x08
+#define SD_VDD3_OC_NOW 0x04
+#define SD_VDD3_OC_EVER 0x02
+
+#define RTS5264_OVP_CTL 0xFD8D
+#define RTS5264_OVP_TIME_MASK 0xF0
+#define RTS5264_OVP_TIME_DFT 0x50
+#define RTS5264_OVP_DETECT_EN 0x08
+#define RTS5264_OVP_INT_EN 0x04
+#define RTS5264_OVP_INT_CLR 0x02
+#define RTS5264_OVP_CLR 0x01
+
+#define RTS5264_OVP_STS 0xFD8E
+#define RTS5264_OVP_GLTCH_TIME_MASK 0xF0
+#define RTS5264_OVP_GLTCH_TIME_DFT 0x50
+#define RTS5264_VOVER_DET 0x08
+#define RTS5264_OVP_NOW 0x04
+#define RTS5264_OVP_EVER 0x02
+
+#define RTS5264_CMD_OE_START_EARLY 0xFDCB
+#define RTS5264_CMD_OE_EARLY_LEAVE 0x08
+#define RTS5264_CMD_OE_EARLY_CYCLE_MASK 0x06
+#define RTS5264_CMD_OE_EARLY_4CYCLE 0x06
+#define RTS5264_CMD_OE_EARLY_3CYCLE 0x04
+#define RTS5264_CMD_OE_EARLY_2CYCLE 0x02
+#define RTS5264_CMD_OE_EARLY_1CYCLE 0x00
+#define RTS5264_CMD_OE_EARLY_EN 0x01
+
+#define RTS5264_DAT_OE_START_EARLY 0xFDCC
+#define RTS5264_DAT_OE_EARLY_LEAVE 0x08
+#define RTS5264_DAT_OE_EARLY_CYCLE_MASK 0x06
+#define RTS5264_DAT_OE_EARLY_4CYCLE 0x06
+#define RTS5264_DAT_OE_EARLY_3CYCLE 0x04
+#define RTS5264_DAT_OE_EARLY_2CYCLE 0x02
+#define RTS5264_DAT_OE_EARLY_1CYCLE 0x00
+#define RTS5264_DAT_OE_EARLY_EN 0x01
+
+#define RTS5264_LDO1233318_POW_CTL 0xFF70
+#define RTS5264_TUNE_REF_LDO3318 (0x03<<6)
+#define RTS5264_TUNE_REF_LDO3318_DFT (0x02<<6)
+#define RTS5264_LDO3318_POWERON (0x01<<3)
+#define RTS5264_LDO3_POWERON (0x01<<2)
+#define RTS5264_LDO2_POWERON (0x01<<1)
+#define RTS5264_LDO1_POWERON (0x01<<0)
+#define RTS5264_LDO_POWERON_MASK (0x0F<<0)
+
+#define RTS5264_DV3318_CFG 0xFF71
+#define RTS5264_DV3318_TUNE_MASK (0x07<<4)
+#define RTS5264_DV3318_18 (0x02<<4)
+#define RTS5264_DV3318_19 (0x04<<4)
+#define RTS5264_DV3318_33 (0x07<<4)
+
+#define RTS5264_LDO1_CFG0 0xFF72
+#define RTS5264_LDO1_OCP_THD_MASK (0x07 << 5)
+#define RTS5264_LDO1_OCP_EN (0x01 << 4)
+#define RTS5264_LDO1_OCP_LMT_THD_MASK (0x03 << 2)
+#define RTS5264_LDO1_OCP_LMT_EN (0x01 << 1)
+
+#define RTS5264_LDO1_OCP_THD_850 (0x00<<5)
+#define RTS5264_LDO1_OCP_THD_950 (0x01<<5)
+#define RTS5264_LDO1_OCP_THD_1050 (0x02<<5)
+#define RTS5264_LDO1_OCP_THD_1100 (0x03<<5)
+#define RTS5264_LDO1_OCP_THD_1150 (0x04<<5)
+#define RTS5264_LDO1_OCP_THD_1200 (0x05<<5)
+#define RTS5264_LDO1_OCP_THD_1300 (0x06<<5)
+#define RTS5264_LDO1_OCP_THD_1350 (0x07<<5)
+
+#define RTS5264_LDO1_LMT_THD_1700 (0x00<<2)
+#define RTS5264_LDO1_LMT_THD_1800 (0x01<<2)
+#define RTS5264_LDO1_LMT_THD_1900 (0x02<<2)
+#define RTS5264_LDO1_LMT_THD_2000 (0x03<<2)
+
+#define RTS5264_LDO1_CFG1 0xFF73
+#define RTS5264_LDO1_TUNE_MASK (0x07<<1)
+#define RTS5264_LDO1_18 (0x05<<1)
+#define RTS5264_LDO1_33 (0x07<<1)
+#define RTS5264_LDO1_PWD_MASK (0x01<<0)
+
+#define RTS5264_LDO2_CFG0 0xFF74
+#define RTS5264_LDO2_OCP_THD_MASK (0x07<<5)
+#define RTS5264_LDO2_OCP_EN (0x01<<4)
+#define RTS5264_LDO2_OCP_LMT_THD_MASK (0x03<<2)
+#define RTS5264_LDO2_OCP_LMT_EN (0x01<<1)
+
+#define RTS5264_LDO2_OCP_THD_750 (0x00<<5)
+#define RTS5264_LDO2_OCP_THD_850 (0x01<<5)
+#define RTS5264_LDO2_OCP_THD_900 (0x02<<5)
+#define RTS5264_LDO2_OCP_THD_950 (0x03<<5)
+#define RTS5264_LDO2_OCP_THD_1050 (0x04<<5)
+#define RTS5264_LDO2_OCP_THD_1100 (0x05<<5)
+#define RTS5264_LDO2_OCP_THD_1150 (0x06<<5)
+#define RTS5264_LDO2_OCP_THD_1200 (0x07<<5)
+
+#define RTS5264_LDO2_LMT_THD_1700 (0x00<<2)
+#define RTS5264_LDO2_LMT_THD_1800 (0x01<<2)
+#define RTS5264_LDO2_LMT_THD_1900 (0x02<<2)
+#define RTS5264_LDO2_LMT_THD_2000 (0x03<<2)
+
+#define RTS5264_LDO2_CFG1 0xFF75
+#define RTS5264_LDO2_TUNE_MASK (0x07<<1)
+#define RTS5264_LDO2_18 (0x02<<1)
+#define RTS5264_LDO2_185 (0x03<<1)
+#define RTS5264_LDO2_19 (0x04<<1)
+#define RTS5264_LDO2_195 (0x05<<1)
+#define RTS5264_LDO2_33 (0x07<<1)
+#define RTS5264_LDO2_PWD_MASK (0x01<<0)
+
+#define RTS5264_LDO3_CFG0 0xFF76
+#define RTS5264_LDO3_OCP_THD_MASK (0x07<<5)
+#define RTS5264_LDO3_OCP_EN (0x01<<4)
+#define RTS5264_LDO3_OCP_LMT_THD_MASK (0x03<<2)
+#define RTS5264_LDO3_OCP_LMT_EN (0x01<<1)
+
+#define RTS5264_LDO3_OCP_THD_610 (0x00<<5)
+#define RTS5264_LDO3_OCP_THD_630 (0x01<<5)
+#define RTS5264_LDO3_OCP_THD_670 (0x02<<5)
+#define RTS5264_LDO3_OCP_THD_710 (0x03<<5)
+#define RTS5264_LDO3_OCP_THD_750 (0x04<<5)
+#define RTS5264_LDO3_OCP_THD_770 (0x05<<5)
+#define RTS5264_LDO3_OCP_THD_810 (0x06<<5)
+#define RTS5264_LDO3_OCP_THD_850 (0x07<<5)
+
+#define RTS5264_LDO3_LMT_THD_1200 (0x00<<2)
+#define RTS5264_LDO3_LMT_THD_1300 (0x01<<2)
+#define RTS5264_LDO3_LMT_THD_1400 (0x02<<2)
+#define RTS5264_LDO3_LMT_THD_1500 (0x03<<2)
+
+#define RTS5264_LDO3_CFG1 0xFF77
+#define RTS5264_LDO3_TUNE_MASK (0x07<<1)
+#define RTS5264_LDO3_12 (0x02<<1)
+#define RTS5264_LDO3_125 (0x03<<1)
+#define RTS5264_LDO3_13 (0x04<<1)
+#define RTS5264_LDO3_135 (0x05<<1)
+#define RTS5264_LDO3_33 (0x07<<1)
+#define RTS5264_LDO3_PWD_MASK (0x01<<0)
+
+#define RTS5264_REG_PME_FORCE_CTL 0xFF78
+#define FORCE_PM_CONTROL 0x20
+#define FORCE_PM_VALUE 0x10
+#define REG_EFUSE_BYPASS 0x08
+#define REG_EFUSE_POR 0x04
+#define REG_EFUSE_POWER_MASK 0x03
+#define REG_EFUSE_POWERON 0x03
+#define REG_EFUSE_POWEROFF 0x00
+
+#define RTS5264_PWR_CUT 0xFF81
+#define RTS5264_CFG_MEM_PD 0xF0
+
+#define RTS5264_OVP_DET 0xFF8A
+#define RTS5264_POW_VDET 0x04
+#define RTS5264_TUNE_VROV_MASK 0x03
+#define RTS5264_TUNE_VROV_2V 0x03
+#define RTS5264_TUNE_VROV_1V8 0x02
+#define RTS5264_TUNE_VROV_1V6 0x01
+#define RTS5264_TUNE_VROV_1V4 0x00
+
+#define RTS5264_CKMUX_MBIAS_PWR 0xFF8B
+#define RTS5264_NON_XTAL_SEL 0x80
+#define RTS5264_POW_CKMUX 0x40
+#define RTS5264_LVD_MASK 0x04
+#define RTS5264_POW_PSW_MASK 0x03
+#define RTS5264_POW_PSW_DFT 0x03
+
+/* Single LUN, support SD/SD EXPRESS */
+#define DEFAULT_SINGLE 0
+#define SD_LUN 1
+#define SD_EXPRESS_LUN 2
+
+int rts5264_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
+ u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk);
+
+#endif /* RTS5264_H */
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index a30751ad3733..1a64364700eb 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -26,6 +26,7 @@
#include "rtsx_pcr.h"
#include "rts5261.h"
#include "rts5228.h"
+#include "rts5264.h"
static bool msi_en = true;
module_param(msi_en, bool, S_IRUGO | S_IWUSR);
@@ -54,6 +55,7 @@ static const struct pci_device_id rtsx_pci_ids[] = {
{ PCI_DEVICE(0x10EC, 0x5260), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x5261), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x5228), PCI_CLASS_OTHERS << 16, 0xFF0000 },
+ { PCI_DEVICE(0x10EC, 0x5264), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ 0, }
};
@@ -714,6 +716,9 @@ int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
if (PCI_PID(pcr) == PID_5228)
return rts5228_pci_switch_clock(pcr, card_clock,
ssc_depth, initial_mode, double_clk, vpclk);
+ if (PCI_PID(pcr) == PID_5264)
+ return rts5264_pci_switch_clock(pcr, card_clock,
+ ssc_depth, initial_mode, double_clk, vpclk);
if (initial_mode) {
/* We use 250k(around) here, in initial stage */
@@ -987,7 +992,8 @@ static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
int_reg &= (pcr->bier | 0x7FFFFF);
- if (int_reg & SD_OC_INT)
+ if ((int_reg & SD_OC_INT) ||
+ ((int_reg & SD_OVP_INT) && (PCI_PID(pcr) == PID_5264)))
rtsx_pci_process_ocp_interrupt(pcr);
if (int_reg & SD_INT) {
@@ -1159,7 +1165,9 @@ void rtsx_pci_enable_oobs_polling(struct rtsx_pcr *pcr)
{
u16 val;
- if ((PCI_PID(pcr) != PID_525A) && (PCI_PID(pcr) != PID_5260)) {
+ if ((PCI_PID(pcr) != PID_525A) &&
+ (PCI_PID(pcr) != PID_5260) &&
+ (PCI_PID(pcr) != PID_5264)) {
rtsx_pci_read_phy_register(pcr, 0x01, &val);
val |= 1<<9;
rtsx_pci_write_phy_register(pcr, 0x01, val);
@@ -1175,7 +1183,9 @@ void rtsx_pci_disable_oobs_polling(struct rtsx_pcr *pcr)
{
u16 val;
- if ((PCI_PID(pcr) != PID_525A) && (PCI_PID(pcr) != PID_5260)) {
+ if ((PCI_PID(pcr) != PID_525A) &&
+ (PCI_PID(pcr) != PID_5260) &&
+ (PCI_PID(pcr) != PID_5264)) {
rtsx_pci_read_phy_register(pcr, 0x01, &val);
val &= ~(1<<9);
rtsx_pci_write_phy_register(pcr, 0x01, val);
@@ -1226,7 +1236,7 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
rtsx_pci_enable_bus_int(pcr);
/* Power on SSC */
- if (PCI_PID(pcr) == PID_5261) {
+ if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5264)) {
/* Gating real mcu clock */
err = rtsx_pci_write_register(pcr, RTS5261_FW_CFG1,
RTS5261_MCU_CLOCK_GATING, 0);
@@ -1270,6 +1280,11 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
else if (PCI_PID(pcr) == PID_5228)
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
RTS5228_SSC_DEPTH_2M);
+ else if (is_version(pcr, 0x5264, IC_VER_A))
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
+ else if (PCI_PID(pcr) == PID_5264)
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
+ RTS5264_SSC_DEPTH_2M);
else
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12);
@@ -1305,6 +1320,7 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
case PID_5260:
case PID_5261:
case PID_5228:
+ case PID_5264:
rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1);
break;
default:
@@ -1404,6 +1420,10 @@ static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
case 0x5228:
rts5228_init_params(pcr);
break;
+
+ case 0x5264:
+ rts5264_init_params(pcr);
+ break;
}
pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n",
@@ -1544,7 +1564,7 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
pcr->pci = pcidev;
dev_set_drvdata(&pcidev->dev, handle);
- if (CHK_PCI_PID(pcr, 0x525A))
+ if ((CHK_PCI_PID(pcr, 0x525A)) || (CHK_PCI_PID(pcr, 0x5264)))
bar = 1;
len = pci_resource_len(pcidev, bar);
base = pci_resource_start(pcidev, bar);
diff --git a/drivers/misc/cardreader/rtsx_pcr.h b/drivers/misc/cardreader/rtsx_pcr.h
index 37d1f316ae17..9215d66de00c 100644
--- a/drivers/misc/cardreader/rtsx_pcr.h
+++ b/drivers/misc/cardreader/rtsx_pcr.h
@@ -74,6 +74,7 @@ void rtl8411b_init_params(struct rtsx_pcr *pcr);
void rts5260_init_params(struct rtsx_pcr *pcr);
void rts5261_init_params(struct rtsx_pcr *pcr);
void rts5228_init_params(struct rtsx_pcr *pcr);
+void rts5264_init_params(struct rtsx_pcr *pcr);
static inline u8 map_sd_drive(int idx)
{
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index 0562071cdd4a..6ad0ab892675 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -836,7 +836,8 @@ static inline bool cxl_is_power8(void)
{
if ((pvr_version_is(PVR_POWER8E)) ||
(pvr_version_is(PVR_POWER8NVL)) ||
- (pvr_version_is(PVR_POWER8)))
+ (pvr_version_is(PVR_POWER8)) ||
+ (pvr_version_is(PVR_HX_C2000)))
return true;
return false;
}
diff --git a/drivers/misc/dw-xdata-pcie.c b/drivers/misc/dw-xdata-pcie.c
index 257c25da5199..efd0ca8cc925 100644
--- a/drivers/misc/dw-xdata-pcie.c
+++ b/drivers/misc/dw-xdata-pcie.c
@@ -333,7 +333,7 @@ static int dw_xdata_pcie_probe(struct pci_dev *pdev,
dw->pdev = pdev;
- id = ida_simple_get(&xdata_ida, 0, 0, GFP_KERNEL);
+ id = ida_alloc(&xdata_ida, GFP_KERNEL);
if (id < 0) {
dev_err(dev, "xData: unable to get id\n");
return id;
@@ -377,7 +377,7 @@ err_kfree_name:
kfree(dw->misc_dev.name);
err_ida_remove:
- ida_simple_remove(&xdata_ida, id);
+ ida_free(&xdata_ida, id);
return err;
}
@@ -396,7 +396,7 @@ static void dw_xdata_pcie_remove(struct pci_dev *pdev)
dw_xdata_stop(dw);
misc_deregister(&dw->misc_dev);
kfree(dw->misc_dev.name);
- ida_simple_remove(&xdata_ida, id);
+ ida_free(&xdata_ida, id);
}
static const struct pci_device_id dw_xdata_pcie_id_table[] = {
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 8279adade07e..572333ead5fb 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -440,12 +440,9 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count)
if (off + count > at24->byte_len)
return -EINVAL;
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- pm_runtime_put_noidle(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
return ret;
- }
-
/*
* Read data from chip, protecting against concurrent updates
* from this host, but not from other I2C masters.
@@ -487,12 +484,9 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count)
if (off + count > at24->byte_len)
return -EINVAL;
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- pm_runtime_put_noidle(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
return ret;
- }
-
/*
* Write data to chip, protecting against concurrent updates
* from this host, but not from other I2C masters.
@@ -564,6 +558,31 @@ static unsigned int at24_get_offset_adj(u8 flags, unsigned int byte_len)
}
}
+static void at24_probe_temp_sensor(struct i2c_client *client)
+{
+ struct at24_data *at24 = i2c_get_clientdata(client);
+ struct i2c_board_info info = { .type = "jc42" };
+ int ret;
+ u8 val;
+
+ /*
+ * Byte 2 has value 11 for DDR3, earlier versions don't
+ * support the thermal sensor present flag
+ */
+ ret = at24_read(at24, 2, &val, 1);
+ if (ret || val != 11)
+ return;
+
+ /* Byte 32, bit 7 is set if temp sensor is present */
+ ret = at24_read(at24, 32, &val, 1);
+ if (ret || !(val & BIT(7)))
+ return;
+
+ info.addr = 0x18 | (client->addr & 7);
+
+ i2c_new_client_device(client->adapter, &info);
+}
+
static int at24_probe(struct i2c_client *client)
{
struct regmap_config regmap_config = { };
@@ -763,6 +782,10 @@ static int at24_probe(struct i2c_client *client)
}
}
+ /* If this a SPD EEPROM, probe for DDR3 thermal sensor */
+ if (cdata == &at24_data_spd)
+ at24_probe_temp_sensor(client);
+
pm_runtime_idle(dev);
if (writable)
diff --git a/drivers/misc/eeprom/ee1004.c b/drivers/misc/eeprom/ee1004.c
index a1acd77130f2..21feebc3044c 100644
--- a/drivers/misc/eeprom/ee1004.c
+++ b/drivers/misc/eeprom/ee1004.c
@@ -31,6 +31,7 @@
* over performance.
*/
+#define EE1004_MAX_BUSSES 8
#define EE1004_ADDR_SET_PAGE 0x36
#define EE1004_NUM_PAGES 2
#define EE1004_PAGE_SIZE 256
@@ -42,9 +43,13 @@
* from page selection to end of read.
*/
static DEFINE_MUTEX(ee1004_bus_lock);
-static struct i2c_client *ee1004_set_page[EE1004_NUM_PAGES];
-static unsigned int ee1004_dev_count;
-static int ee1004_current_page;
+
+static struct ee1004_bus_data {
+ struct i2c_adapter *adap;
+ struct i2c_client *set_page[EE1004_NUM_PAGES];
+ unsigned int dev_count;
+ int current_page;
+} ee1004_bus_data[EE1004_MAX_BUSSES];
static const struct i2c_device_id ee1004_ids[] = {
{ "ee1004", 0 },
@@ -54,11 +59,29 @@ MODULE_DEVICE_TABLE(i2c, ee1004_ids);
/*-------------------------------------------------------------------------*/
-static int ee1004_get_current_page(void)
+static struct ee1004_bus_data *ee1004_get_bus_data(struct i2c_adapter *adap)
+{
+ int i;
+
+ for (i = 0; i < EE1004_MAX_BUSSES; i++)
+ if (ee1004_bus_data[i].adap == adap)
+ return ee1004_bus_data + i;
+
+ /* If not existent yet, create new entry */
+ for (i = 0; i < EE1004_MAX_BUSSES; i++)
+ if (!ee1004_bus_data[i].adap) {
+ ee1004_bus_data[i].adap = adap;
+ return ee1004_bus_data + i;
+ }
+
+ return NULL;
+}
+
+static int ee1004_get_current_page(struct ee1004_bus_data *bd)
{
int err;
- err = i2c_smbus_read_byte(ee1004_set_page[0]);
+ err = i2c_smbus_read_byte(bd->set_page[0]);
if (err == -ENXIO) {
/* Nack means page 1 is selected */
return 1;
@@ -72,28 +95,29 @@ static int ee1004_get_current_page(void)
return 0;
}
-static int ee1004_set_current_page(struct device *dev, int page)
+static int ee1004_set_current_page(struct i2c_client *client, int page)
{
+ struct ee1004_bus_data *bd = i2c_get_clientdata(client);
int ret;
- if (page == ee1004_current_page)
+ if (page == bd->current_page)
return 0;
/* Data is ignored */
- ret = i2c_smbus_write_byte(ee1004_set_page[page], 0x00);
+ ret = i2c_smbus_write_byte(bd->set_page[page], 0x00);
/*
* Don't give up just yet. Some memory modules will select the page
* but not ack the command. Check which page is selected now.
*/
- if (ret == -ENXIO && ee1004_get_current_page() == page)
+ if (ret == -ENXIO && ee1004_get_current_page(bd) == page)
ret = 0;
if (ret < 0) {
- dev_err(dev, "Failed to select page %d (%d)\n", page, ret);
+ dev_err(&client->dev, "Failed to select page %d (%d)\n", page, ret);
return ret;
}
- dev_dbg(dev, "Selected page %d\n", page);
- ee1004_current_page = page;
+ dev_dbg(&client->dev, "Selected page %d\n", page);
+ bd->current_page = page;
return 0;
}
@@ -106,7 +130,7 @@ static ssize_t ee1004_eeprom_read(struct i2c_client *client, char *buf,
page = offset >> EE1004_PAGE_SHIFT;
offset &= (1 << EE1004_PAGE_SHIFT) - 1;
- status = ee1004_set_current_page(&client->dev, page);
+ status = ee1004_set_current_page(client, page);
if (status)
return status;
@@ -158,17 +182,34 @@ static struct bin_attribute *ee1004_attrs[] = {
BIN_ATTRIBUTE_GROUPS(ee1004);
-static void ee1004_cleanup(int idx)
+static void ee1004_probe_temp_sensor(struct i2c_client *client)
{
- if (--ee1004_dev_count == 0)
- while (--idx >= 0) {
- i2c_unregister_device(ee1004_set_page[idx]);
- ee1004_set_page[idx] = NULL;
- }
+ struct i2c_board_info info = { .type = "jc42" };
+ u8 byte14;
+ int ret;
+
+ /* byte 14, bit 7 is set if temp sensor is present */
+ ret = ee1004_eeprom_read(client, &byte14, 14, 1);
+ if (ret != 1 || !(byte14 & BIT(7)))
+ return;
+
+ info.addr = 0x18 | (client->addr & 7);
+
+ i2c_new_client_device(client->adapter, &info);
+}
+
+static void ee1004_cleanup(int idx, struct ee1004_bus_data *bd)
+{
+ if (--bd->dev_count == 0) {
+ while (--idx >= 0)
+ i2c_unregister_device(bd->set_page[idx]);
+ memset(bd, 0, sizeof(struct ee1004_bus_data));
+ }
}
static int ee1004_probe(struct i2c_client *client)
{
+ struct ee1004_bus_data *bd;
int err, cnr = 0;
/* Make sure we can operate on this adapter */
@@ -178,9 +219,19 @@ static int ee1004_probe(struct i2c_client *client)
I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_READ_BYTE_DATA))
return -EPFNOSUPPORT;
- /* Use 2 dummy devices for page select command */
mutex_lock(&ee1004_bus_lock);
- if (++ee1004_dev_count == 1) {
+
+ bd = ee1004_get_bus_data(client->adapter);
+ if (!bd) {
+ mutex_unlock(&ee1004_bus_lock);
+ return dev_err_probe(&client->dev, -ENOSPC,
+ "Only %d busses supported", EE1004_MAX_BUSSES);
+ }
+
+ i2c_set_clientdata(client, bd);
+
+ if (++bd->dev_count == 1) {
+ /* Use 2 dummy devices for page select command */
for (cnr = 0; cnr < EE1004_NUM_PAGES; cnr++) {
struct i2c_client *cl;
@@ -189,21 +240,19 @@ static int ee1004_probe(struct i2c_client *client)
err = PTR_ERR(cl);
goto err_clients;
}
- ee1004_set_page[cnr] = cl;
+ bd->set_page[cnr] = cl;
}
/* Remember current page to avoid unneeded page select */
- err = ee1004_get_current_page();
+ err = ee1004_get_current_page(bd);
if (err < 0)
goto err_clients;
dev_dbg(&client->dev, "Currently selected page: %d\n", err);
- ee1004_current_page = err;
- } else if (client->adapter != ee1004_set_page[0]->adapter) {
- dev_err(&client->dev,
- "Driver only supports devices on a single I2C bus\n");
- err = -EOPNOTSUPP;
- goto err_clients;
+ bd->current_page = err;
}
+
+ ee1004_probe_temp_sensor(client);
+
mutex_unlock(&ee1004_bus_lock);
dev_info(&client->dev,
@@ -213,7 +262,7 @@ static int ee1004_probe(struct i2c_client *client)
return 0;
err_clients:
- ee1004_cleanup(cnr);
+ ee1004_cleanup(cnr, bd);
mutex_unlock(&ee1004_bus_lock);
return err;
@@ -221,9 +270,11 @@ static int ee1004_probe(struct i2c_client *client)
static void ee1004_remove(struct i2c_client *client)
{
+ struct ee1004_bus_data *bd = i2c_get_clientdata(client);
+
/* Remove page select clients if this is the last device */
mutex_lock(&ee1004_bus_lock);
- ee1004_cleanup(EE1004_NUM_PAGES);
+ ee1004_cleanup(EE1004_NUM_PAGES, bd);
mutex_unlock(&ee1004_bus_lock);
}
diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c
index 55fc5b80e649..4441aca2280a 100644
--- a/drivers/misc/genwqe/card_dev.c
+++ b/drivers/misc/genwqe/card_dev.c
@@ -443,7 +443,7 @@ static int genwqe_mmap(struct file *filp, struct vm_area_struct *vma)
if (vsize == 0)
return -EINVAL;
- if (get_order(vsize) > MAX_ORDER)
+ if (get_order(vsize) > MAX_PAGE_ORDER)
return -ENOMEM;
dma_map = kzalloc(sizeof(struct dma_mapping), GFP_KERNEL);
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index 1c798d6b2dfb..a2c4a9b4f871 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -210,7 +210,7 @@ u32 genwqe_crc32(u8 *buff, size_t len, u32 init)
void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size,
dma_addr_t *dma_handle)
{
- if (get_order(size) > MAX_ORDER)
+ if (get_order(size) > MAX_PAGE_ORDER)
return NULL;
return dma_alloc_coherent(&cd->pci_dev->dev, size, dma_handle,
@@ -308,7 +308,7 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
sgl->write = write;
sgl->sgl_size = genwqe_sgl_size(sgl->nr_pages);
- if (get_order(sgl->sgl_size) > MAX_ORDER) {
+ if (get_order(sgl->sgl_size) > MAX_PAGE_ORDER) {
dev_err(&pci_dev->dev,
"[%s] err: too much memory requested!\n", __func__);
return ret;
diff --git a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
index 3882e97e96a7..c6eb27d46cb0 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
@@ -150,6 +150,7 @@ static int lis3lv02d_i2c_probe(struct i2c_client *client)
lis3_dev.init = lis3_i2c_init;
lis3_dev.read = lis3_i2c_read;
lis3_dev.write = lis3_i2c_write;
+ lis3_dev.reg_ctrl = lis3_reg_ctrl;
lis3_dev.irq = client->irq;
lis3_dev.ac = lis3lv02d_axis_map;
lis3_dev.pm_dev = &client->dev;
diff --git a/drivers/misc/lkdtm/heap.c b/drivers/misc/lkdtm/heap.c
index 0ce4cbf6abda..4f467d3972a6 100644
--- a/drivers/misc/lkdtm/heap.c
+++ b/drivers/misc/lkdtm/heap.c
@@ -4,6 +4,7 @@
* page allocation and slab allocations.
*/
#include "lkdtm.h"
+#include <linux/kfence.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/sched.h>
@@ -132,6 +133,64 @@ static void lkdtm_READ_AFTER_FREE(void)
kfree(val);
}
+static void lkdtm_KFENCE_READ_AFTER_FREE(void)
+{
+ int *base, val, saw;
+ unsigned long timeout, resched_after;
+ size_t len = 1024;
+ /*
+ * The slub allocator will use the either the first word or
+ * the middle of the allocation to store the free pointer,
+ * depending on configurations. Store in the second word to
+ * avoid running into the freelist.
+ */
+ size_t offset = sizeof(*base);
+
+ /*
+ * 100x the sample interval should be more than enough to ensure we get
+ * a KFENCE allocation eventually.
+ */
+ timeout = jiffies + msecs_to_jiffies(100 * kfence_sample_interval);
+ /*
+ * Especially for non-preemption kernels, ensure the allocation-gate
+ * timer can catch up: after @resched_after, every failed allocation
+ * attempt yields, to ensure the allocation-gate timer is scheduled.
+ */
+ resched_after = jiffies + msecs_to_jiffies(kfence_sample_interval);
+ do {
+ base = kmalloc(len, GFP_KERNEL);
+ if (!base) {
+ pr_err("FAIL: Unable to allocate kfence memory!\n");
+ return;
+ }
+
+ if (is_kfence_address(base)) {
+ val = 0x12345678;
+ base[offset] = val;
+ pr_info("Value in memory before free: %x\n", base[offset]);
+
+ kfree(base);
+
+ pr_info("Attempting bad read from freed memory\n");
+ saw = base[offset];
+ if (saw != val) {
+ /* Good! Poisoning happened, so declare a win. */
+ pr_info("Memory correctly poisoned (%x)\n", saw);
+ } else {
+ pr_err("FAIL: Memory was not poisoned!\n");
+ pr_expected_config_param(CONFIG_INIT_ON_FREE_DEFAULT_ON, "init_on_free");
+ }
+ return;
+ }
+
+ kfree(base);
+ if (time_after(jiffies, resched_after))
+ cond_resched();
+ } while (time_before(jiffies, timeout));
+
+ pr_err("FAIL: kfence memory never allocated!\n");
+}
+
static void lkdtm_WRITE_BUDDY_AFTER_FREE(void)
{
unsigned long p = __get_free_page(GFP_KERNEL);
@@ -327,6 +386,7 @@ static struct crashtype crashtypes[] = {
CRASHTYPE(VMALLOC_LINEAR_OVERFLOW),
CRASHTYPE(WRITE_AFTER_FREE),
CRASHTYPE(READ_AFTER_FREE),
+ CRASHTYPE(KFENCE_READ_AFTER_FREE),
CRASHTYPE(WRITE_BUDDY_AFTER_FREE),
CRASHTYPE(READ_BUDDY_AFTER_FREE),
CRASHTYPE(SLAB_INIT_ON_ALLOC),
diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig
index 37db142de413..67d9391f1855 100644
--- a/drivers/misc/mei/Kconfig
+++ b/drivers/misc/mei/Kconfig
@@ -3,6 +3,7 @@
config INTEL_MEI
tristate "Intel Management Engine Interface"
depends on X86 && PCI
+ default GENERIC_CPU || MCORE2 || MATOM || X86_GENERIC
help
The Intel Management Engine (Intel ME) provides Manageability,
Security and Media services for system containing Intel chipsets.
@@ -11,10 +12,11 @@ config INTEL_MEI
For more information see
<https://software.intel.com/en-us/manageability/>
+if INTEL_MEI
+
config INTEL_MEI_ME
tristate "ME Enabled Intel Chipsets"
- select INTEL_MEI
- depends on X86 && PCI
+ default y
help
MEI support for ME Enabled Intel chipsets.
@@ -38,8 +40,6 @@ config INTEL_MEI_ME
config INTEL_MEI_TXE
tristate "Intel Trusted Execution Environment with ME Interface"
- select INTEL_MEI
- depends on X86 && PCI
help
MEI Support for Trusted Execution Environment device on Intel SoCs
@@ -48,9 +48,7 @@ config INTEL_MEI_TXE
config INTEL_MEI_GSC
tristate "Intel MEI GSC embedded device"
- depends on INTEL_MEI
depends on INTEL_MEI_ME
- depends on X86 && PCI
depends on DRM_I915
help
Intel auxiliary driver for GSC devices embedded in Intel graphics devices.
@@ -60,6 +58,31 @@ config INTEL_MEI_GSC
tasks such as graphics card firmware update and security
tasks.
+config INTEL_MEI_VSC_HW
+ tristate "Intel visual sensing controller device transport driver"
+ depends on ACPI && SPI
+ depends on GPIOLIB || COMPILE_TEST
+ help
+ Intel SPI transport driver between host and Intel visual sensing
+ controller (IVSC) device.
+
+ This driver can also be built as a module. If so, the module
+ will be called mei-vsc-hw.
+
+config INTEL_MEI_VSC
+ tristate "Intel visual sensing controller device with ME interface"
+ depends on INTEL_MEI_VSC_HW
+ help
+ Intel MEI over SPI driver for Intel visual sensing controller
+ (IVSC) device embedded in IA platform. It supports camera sharing
+ between IVSC for context sensing and IPU for typical media usage.
+ Select this config should enable transport layer for IVSC device.
+
+ This driver can also be built as a module. If so, the module
+ will be called mei-vsc.
+
source "drivers/misc/mei/hdcp/Kconfig"
source "drivers/misc/mei/pxp/Kconfig"
source "drivers/misc/mei/gsc_proxy/Kconfig"
+
+endif
diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile
index 14aee253ae48..6f9fdbf1a495 100644
--- a/drivers/misc/mei/Makefile
+++ b/drivers/misc/mei/Makefile
@@ -31,3 +31,10 @@ CFLAGS_mei-trace.o = -I$(src)
obj-$(CONFIG_INTEL_MEI_HDCP) += hdcp/
obj-$(CONFIG_INTEL_MEI_PXP) += pxp/
obj-$(CONFIG_INTEL_MEI_GSC_PROXY) += gsc_proxy/
+
+obj-$(CONFIG_INTEL_MEI_VSC_HW) += mei-vsc-hw.o
+mei-vsc-hw-y := vsc-tp.o
+mei-vsc-hw-y += vsc-fw-loader.o
+
+obj-$(CONFIG_INTEL_MEI_VSC) += mei-vsc.o
+mei-vsc-y := platform-vsc.o
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 9c8fc87938a7..9d090fa07516 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -2011,7 +2011,7 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, unsigned long time
mei_hdr = mei_msg_hdr_init(cb);
if (IS_ERR(mei_hdr)) {
- rets = -PTR_ERR(mei_hdr);
+ rets = PTR_ERR(mei_hdr);
mei_hdr = NULL;
goto err;
}
@@ -2032,7 +2032,7 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, unsigned long time
hbuf_slots = mei_hbuf_empty_slots(dev);
if (hbuf_slots < 0) {
- rets = -EOVERFLOW;
+ buf_len = -EOVERFLOW;
goto out;
}
diff --git a/drivers/misc/mei/gsc_proxy/Kconfig b/drivers/misc/mei/gsc_proxy/Kconfig
index 5f68d9f3d691..ac78b9d1eccd 100644
--- a/drivers/misc/mei/gsc_proxy/Kconfig
+++ b/drivers/misc/mei/gsc_proxy/Kconfig
@@ -3,7 +3,7 @@
#
config INTEL_MEI_GSC_PROXY
tristate "Intel GSC Proxy services of ME Interface"
- select INTEL_MEI_ME
+ depends on INTEL_MEI_ME
depends on DRM_I915
help
MEI Support for GSC Proxy Services on Intel platforms.
diff --git a/drivers/misc/mei/hdcp/Kconfig b/drivers/misc/mei/hdcp/Kconfig
index 54e1c9526909..9be312ec798d 100644
--- a/drivers/misc/mei/hdcp/Kconfig
+++ b/drivers/misc/mei/hdcp/Kconfig
@@ -3,7 +3,7 @@
#
config INTEL_MEI_HDCP
tristate "Intel HDCP2.2 services of ME Interface"
- select INTEL_MEI_ME
+ depends on INTEL_MEI_ME
depends on DRM_I915
help
MEI Support for HDCP2.2 Services on Intel platforms.
diff --git a/drivers/misc/mei/platform-vsc.c b/drivers/misc/mei/platform-vsc.c
new file mode 100644
index 000000000000..8d303c6c0000
--- /dev/null
+++ b/drivers/misc/mei/platform-vsc.c
@@ -0,0 +1,450 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, Intel Corporation.
+ * Intel Visual Sensing Controller Interface Linux driver
+ */
+
+#include <linux/align.h>
+#include <linux/cache.h>
+#include <linux/cleanup.h>
+#include <linux/iopoll.h>
+#include <linux/list.h>
+#include <linux/mei.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/overflow.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/timekeeping.h>
+#include <linux/types.h>
+
+#include <asm-generic/bug.h>
+#include <asm-generic/unaligned.h>
+
+#include "mei_dev.h"
+#include "vsc-tp.h"
+
+#define MEI_VSC_DRV_NAME "intel_vsc"
+
+#define MEI_VSC_MAX_MSG_SIZE 512
+
+#define MEI_VSC_POLL_DELAY_US (50 * USEC_PER_MSEC)
+#define MEI_VSC_POLL_TIMEOUT_US (200 * USEC_PER_MSEC)
+
+#define mei_dev_to_vsc_hw(dev) ((struct mei_vsc_hw *)((dev)->hw))
+
+struct mei_vsc_host_timestamp {
+ u64 realtime;
+ u64 boottime;
+};
+
+struct mei_vsc_hw {
+ struct vsc_tp *tp;
+
+ bool fw_ready;
+ bool host_ready;
+
+ atomic_t write_lock_cnt;
+
+ u32 rx_len;
+ u32 rx_hdr;
+
+ /* buffer for tx */
+ char tx_buf[MEI_VSC_MAX_MSG_SIZE + sizeof(struct mei_msg_hdr)] ____cacheline_aligned;
+ /* buffer for rx */
+ char rx_buf[MEI_VSC_MAX_MSG_SIZE + sizeof(struct mei_msg_hdr)] ____cacheline_aligned;
+};
+
+static int mei_vsc_read_helper(struct mei_vsc_hw *hw, u8 *buf,
+ u32 max_len)
+{
+ struct mei_vsc_host_timestamp ts = {
+ .realtime = ktime_to_ns(ktime_get_real()),
+ .boottime = ktime_to_ns(ktime_get_boottime()),
+ };
+
+ return vsc_tp_xfer(hw->tp, VSC_TP_CMD_READ, &ts, sizeof(ts),
+ buf, max_len);
+}
+
+static int mei_vsc_write_helper(struct mei_vsc_hw *hw, u8 *buf, u32 len)
+{
+ u8 status;
+
+ return vsc_tp_xfer(hw->tp, VSC_TP_CMD_WRITE, buf, len, &status,
+ sizeof(status));
+}
+
+static int mei_vsc_fw_status(struct mei_device *mei_dev,
+ struct mei_fw_status *fw_status)
+{
+ if (!fw_status)
+ return -EINVAL;
+
+ fw_status->count = 0;
+
+ return 0;
+}
+
+static inline enum mei_pg_state mei_vsc_pg_state(struct mei_device *mei_dev)
+{
+ return MEI_PG_OFF;
+}
+
+static void mei_vsc_intr_enable(struct mei_device *mei_dev)
+{
+ struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
+
+ vsc_tp_intr_enable(hw->tp);
+}
+
+static void mei_vsc_intr_disable(struct mei_device *mei_dev)
+{
+ struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
+
+ vsc_tp_intr_disable(hw->tp);
+}
+
+/* mei framework requires this ops */
+static void mei_vsc_intr_clear(struct mei_device *mei_dev)
+{
+}
+
+/* wait for pending irq handler */
+static void mei_vsc_synchronize_irq(struct mei_device *mei_dev)
+{
+ struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
+
+ vsc_tp_intr_synchronize(hw->tp);
+}
+
+static int mei_vsc_hw_config(struct mei_device *mei_dev)
+{
+ return 0;
+}
+
+static bool mei_vsc_host_is_ready(struct mei_device *mei_dev)
+{
+ struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
+
+ return hw->host_ready;
+}
+
+static bool mei_vsc_hw_is_ready(struct mei_device *mei_dev)
+{
+ struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
+
+ return hw->fw_ready;
+}
+
+static int mei_vsc_hw_start(struct mei_device *mei_dev)
+{
+ struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
+ int ret, rlen;
+ u8 buf;
+
+ hw->host_ready = true;
+
+ vsc_tp_intr_enable(hw->tp);
+
+ ret = read_poll_timeout(mei_vsc_read_helper, rlen,
+ rlen >= 0, MEI_VSC_POLL_DELAY_US,
+ MEI_VSC_POLL_TIMEOUT_US, true,
+ hw, &buf, sizeof(buf));
+ if (ret) {
+ dev_err(mei_dev->dev, "wait fw ready failed: %d\n", ret);
+ return ret;
+ }
+
+ hw->fw_ready = true;
+
+ return 0;
+}
+
+static bool mei_vsc_hbuf_is_ready(struct mei_device *mei_dev)
+{
+ struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
+
+ return atomic_read(&hw->write_lock_cnt) == 0;
+}
+
+static int mei_vsc_hbuf_empty_slots(struct mei_device *mei_dev)
+{
+ return MEI_VSC_MAX_MSG_SIZE / MEI_SLOT_SIZE;
+}
+
+static u32 mei_vsc_hbuf_depth(const struct mei_device *mei_dev)
+{
+ return MEI_VSC_MAX_MSG_SIZE / MEI_SLOT_SIZE;
+}
+
+static int mei_vsc_write(struct mei_device *mei_dev,
+ const void *hdr, size_t hdr_len,
+ const void *data, size_t data_len)
+{
+ struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
+ char *buf = hw->tx_buf;
+ int ret;
+
+ if (WARN_ON(!hdr || !IS_ALIGNED(hdr_len, 4)))
+ return -EINVAL;
+
+ if (!data || data_len > MEI_VSC_MAX_MSG_SIZE)
+ return -EINVAL;
+
+ atomic_inc(&hw->write_lock_cnt);
+
+ memcpy(buf, hdr, hdr_len);
+ memcpy(buf + hdr_len, data, data_len);
+
+ ret = mei_vsc_write_helper(hw, buf, hdr_len + data_len);
+
+ atomic_dec_if_positive(&hw->write_lock_cnt);
+
+ return ret < 0 ? ret : 0;
+}
+
+static inline u32 mei_vsc_read(const struct mei_device *mei_dev)
+{
+ struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
+ int ret;
+
+ ret = mei_vsc_read_helper(hw, hw->rx_buf, sizeof(hw->rx_buf));
+ if (ret < 0 || ret < sizeof(u32))
+ return 0;
+ hw->rx_len = ret;
+
+ hw->rx_hdr = get_unaligned_le32(hw->rx_buf);
+
+ return hw->rx_hdr;
+}
+
+static int mei_vsc_count_full_read_slots(struct mei_device *mei_dev)
+{
+ return MEI_VSC_MAX_MSG_SIZE / MEI_SLOT_SIZE;
+}
+
+static int mei_vsc_read_slots(struct mei_device *mei_dev, unsigned char *buf,
+ unsigned long len)
+{
+ struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
+ struct mei_msg_hdr *hdr;
+
+ hdr = (struct mei_msg_hdr *)&hw->rx_hdr;
+ if (len != hdr->length || hdr->length + sizeof(*hdr) != hw->rx_len)
+ return -EINVAL;
+
+ memcpy(buf, hw->rx_buf + sizeof(*hdr), len);
+
+ return 0;
+}
+
+static bool mei_vsc_pg_in_transition(struct mei_device *mei_dev)
+{
+ return mei_dev->pg_event >= MEI_PG_EVENT_WAIT &&
+ mei_dev->pg_event <= MEI_PG_EVENT_INTR_WAIT;
+}
+
+static bool mei_vsc_pg_is_enabled(struct mei_device *mei_dev)
+{
+ return false;
+}
+
+static int mei_vsc_hw_reset(struct mei_device *mei_dev, bool intr_enable)
+{
+ struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
+
+ vsc_tp_reset(hw->tp);
+
+ vsc_tp_intr_disable(hw->tp);
+
+ return vsc_tp_init(hw->tp, mei_dev->dev);
+}
+
+static const struct mei_hw_ops mei_vsc_hw_ops = {
+ .fw_status = mei_vsc_fw_status,
+ .pg_state = mei_vsc_pg_state,
+
+ .host_is_ready = mei_vsc_host_is_ready,
+ .hw_is_ready = mei_vsc_hw_is_ready,
+ .hw_reset = mei_vsc_hw_reset,
+ .hw_config = mei_vsc_hw_config,
+ .hw_start = mei_vsc_hw_start,
+
+ .pg_in_transition = mei_vsc_pg_in_transition,
+ .pg_is_enabled = mei_vsc_pg_is_enabled,
+
+ .intr_clear = mei_vsc_intr_clear,
+ .intr_enable = mei_vsc_intr_enable,
+ .intr_disable = mei_vsc_intr_disable,
+ .synchronize_irq = mei_vsc_synchronize_irq,
+
+ .hbuf_free_slots = mei_vsc_hbuf_empty_slots,
+ .hbuf_is_ready = mei_vsc_hbuf_is_ready,
+ .hbuf_depth = mei_vsc_hbuf_depth,
+ .write = mei_vsc_write,
+
+ .rdbuf_full_slots = mei_vsc_count_full_read_slots,
+ .read_hdr = mei_vsc_read,
+ .read = mei_vsc_read_slots,
+};
+
+static void mei_vsc_event_cb(void *context)
+{
+ struct mei_device *mei_dev = context;
+ struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
+ struct list_head cmpl_list;
+ s32 slots;
+ int ret;
+
+ if (mei_dev->dev_state == MEI_DEV_RESETTING ||
+ mei_dev->dev_state == MEI_DEV_INITIALIZING)
+ return;
+
+ INIT_LIST_HEAD(&cmpl_list);
+
+ guard(mutex)(&mei_dev->device_lock);
+
+ while (vsc_tp_need_read(hw->tp)) {
+ /* check slots available for reading */
+ slots = mei_count_full_read_slots(mei_dev);
+
+ ret = mei_irq_read_handler(mei_dev, &cmpl_list, &slots);
+ if (ret) {
+ if (ret != -ENODATA) {
+ if (mei_dev->dev_state != MEI_DEV_RESETTING &&
+ mei_dev->dev_state != MEI_DEV_POWER_DOWN)
+ schedule_work(&mei_dev->reset_work);
+ }
+
+ return;
+ }
+ }
+
+ mei_dev->hbuf_is_ready = mei_hbuf_is_ready(mei_dev);
+ ret = mei_irq_write_handler(mei_dev, &cmpl_list);
+ if (ret)
+ dev_err(mei_dev->dev, "dispatch write request failed: %d\n", ret);
+
+ mei_dev->hbuf_is_ready = mei_hbuf_is_ready(mei_dev);
+ mei_irq_compl_handler(mei_dev, &cmpl_list);
+}
+
+static int mei_vsc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mei_device *mei_dev;
+ struct mei_vsc_hw *hw;
+ struct vsc_tp *tp;
+ int ret;
+
+ tp = *(struct vsc_tp **)dev_get_platdata(dev);
+ if (!tp)
+ return dev_err_probe(dev, -ENODEV, "no platform data\n");
+
+ mei_dev = devm_kzalloc(dev, size_add(sizeof(*mei_dev), sizeof(*hw)),
+ GFP_KERNEL);
+ if (!mei_dev)
+ return -ENOMEM;
+
+ mei_device_init(mei_dev, dev, false, &mei_vsc_hw_ops);
+ mei_dev->fw_f_fw_ver_supported = 0;
+ mei_dev->kind = "ivsc";
+
+ hw = mei_dev_to_vsc_hw(mei_dev);
+ atomic_set(&hw->write_lock_cnt, 0);
+ hw->tp = tp;
+
+ platform_set_drvdata(pdev, mei_dev);
+
+ vsc_tp_register_event_cb(tp, mei_vsc_event_cb, mei_dev);
+
+ ret = mei_start(mei_dev);
+ if (ret) {
+ dev_err_probe(dev, ret, "init hw failed\n");
+ goto err_cancel;
+ }
+
+ ret = mei_register(mei_dev, dev);
+ if (ret)
+ goto err_stop;
+
+ pm_runtime_enable(mei_dev->dev);
+
+ return 0;
+
+err_stop:
+ mei_stop(mei_dev);
+
+err_cancel:
+ mei_cancel_work(mei_dev);
+
+ mei_disable_interrupts(mei_dev);
+
+ return ret;
+}
+
+static int mei_vsc_remove(struct platform_device *pdev)
+{
+ struct mei_device *mei_dev = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(mei_dev->dev);
+
+ mei_stop(mei_dev);
+
+ mei_disable_interrupts(mei_dev);
+
+ mei_deregister(mei_dev);
+
+ return 0;
+}
+
+static int mei_vsc_suspend(struct device *dev)
+{
+ struct mei_device *mei_dev = dev_get_drvdata(dev);
+
+ mei_stop(mei_dev);
+
+ return 0;
+}
+
+static int mei_vsc_resume(struct device *dev)
+{
+ struct mei_device *mei_dev = dev_get_drvdata(dev);
+ int ret;
+
+ ret = mei_restart(mei_dev);
+ if (ret)
+ return ret;
+
+ /* start timer if stopped in suspend */
+ schedule_delayed_work(&mei_dev->timer_work, HZ);
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(mei_vsc_pm_ops, mei_vsc_suspend, mei_vsc_resume);
+
+static const struct platform_device_id mei_vsc_id_table[] = {
+ { MEI_VSC_DRV_NAME },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, mei_vsc_id_table);
+
+static struct platform_driver mei_vsc_drv = {
+ .probe = mei_vsc_probe,
+ .remove = mei_vsc_remove,
+ .id_table = mei_vsc_id_table,
+ .driver = {
+ .name = MEI_VSC_DRV_NAME,
+ .pm = &mei_vsc_pm_ops,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+module_platform_driver(mei_vsc_drv);
+
+MODULE_AUTHOR("Wentong Wu <wentong.wu@intel.com>");
+MODULE_AUTHOR("Zhifeng Wang <zhifeng.wang@intel.com>");
+MODULE_DESCRIPTION("Intel Visual Sensing Controller Interface");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(VSC_TP);
diff --git a/drivers/misc/mei/pxp/Kconfig b/drivers/misc/mei/pxp/Kconfig
index 4029b96afc04..e9219b61cd92 100644
--- a/drivers/misc/mei/pxp/Kconfig
+++ b/drivers/misc/mei/pxp/Kconfig
@@ -1,10 +1,9 @@
-
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2020, Intel Corporation. All rights reserved.
#
config INTEL_MEI_PXP
tristate "Intel PXP services of ME Interface"
- select INTEL_MEI_ME
+ depends on INTEL_MEI_ME
depends on DRM_I915
help
MEI Support for PXP Services on Intel platforms.
diff --git a/drivers/misc/mei/pxp/mei_pxp.c b/drivers/misc/mei/pxp/mei_pxp.c
index f77d78fa5054..787c6a27a4be 100644
--- a/drivers/misc/mei/pxp/mei_pxp.c
+++ b/drivers/misc/mei/pxp/mei_pxp.c
@@ -84,9 +84,10 @@ mei_pxp_send_message(struct device *dev, const void *message, size_t size, unsig
byte = ret;
break;
}
+ return byte;
}
- return byte;
+ return 0;
}
/**
diff --git a/drivers/misc/mei/vsc-fw-loader.c b/drivers/misc/mei/vsc-fw-loader.c
new file mode 100644
index 000000000000..ffa4ccd96a10
--- /dev/null
+++ b/drivers/misc/mei/vsc-fw-loader.c
@@ -0,0 +1,770 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, Intel Corporation.
+ * Intel Visual Sensing Controller Transport Layer Linux driver
+ */
+
+#include <linux/acpi.h>
+#include <linux/align.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/firmware.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/string_helpers.h>
+#include <linux/types.h>
+
+#include <asm-generic/unaligned.h>
+
+#include "vsc-tp.h"
+
+#define VSC_MAGIC_NUM 0x49505343 /* IPSC */
+#define VSC_MAGIC_FW 0x49574653 /* IWFS */
+#define VSC_MAGIC_FILE 0x46564353 /* FVCS */
+
+#define VSC_ADDR_BASE 0xE0030000
+#define VSC_EFUSE_ADDR (VSC_ADDR_BASE + 0x038)
+#define VSC_STRAP_ADDR (VSC_ADDR_BASE + 0x100)
+
+#define VSC_MAINSTEPPING_VERSION_MASK GENMASK(7, 4)
+#define VSC_MAINSTEPPING_VERSION_A 0
+
+#define VSC_SUBSTEPPING_VERSION_MASK GENMASK(3, 0)
+#define VSC_SUBSTEPPING_VERSION_0 0
+#define VSC_SUBSTEPPING_VERSION_1 2
+
+#define VSC_BOOT_IMG_OPTION_MASK GENMASK(15, 0)
+
+#define VSC_SKU_CFG_LOCATION 0x5001A000
+#define VSC_SKU_MAX_SIZE 4100u
+
+#define VSC_ACE_IMG_CNT 2
+#define VSC_CSI_IMG_CNT 4
+#define VSC_IMG_CNT_MAX 6
+
+#define VSC_ROM_PKG_SIZE 256u
+#define VSC_FW_PKG_SIZE 512u
+
+#define VSC_IMAGE_DIR "intel/vsc/"
+
+#define VSC_CSI_IMAGE_NAME VSC_IMAGE_DIR "ivsc_fw.bin"
+#define VSC_ACE_IMAGE_NAME_FMT VSC_IMAGE_DIR "ivsc_pkg_%s_0.bin"
+#define VSC_CFG_IMAGE_NAME_FMT VSC_IMAGE_DIR "ivsc_skucfg_%s_0_1.bin"
+
+#define VSC_IMAGE_PATH_MAX_LEN 64
+
+#define VSC_SENSOR_NAME_MAX_LEN 16
+
+/* command id */
+enum {
+ VSC_CMD_QUERY = 0,
+ VSC_CMD_DL_SET = 1,
+ VSC_CMD_DL_START = 2,
+ VSC_CMD_DL_CONT = 3,
+ VSC_CMD_DUMP_MEM = 4,
+ VSC_CMD_GET_CONT = 8,
+ VSC_CMD_CAM_BOOT = 10,
+};
+
+/* command ack token */
+enum {
+ VSC_TOKEN_BOOTLOADER_REQ = 1,
+ VSC_TOKEN_DUMP_RESP = 4,
+ VSC_TOKEN_ERROR = 7,
+};
+
+/* image type */
+enum {
+ VSC_IMG_BOOTLOADER_TYPE = 1,
+ VSC_IMG_CSI_EM7D_TYPE,
+ VSC_IMG_CSI_SEM_TYPE,
+ VSC_IMG_CSI_RUNTIME_TYPE,
+ VSC_IMG_ACE_VISION_TYPE,
+ VSC_IMG_ACE_CFG_TYPE,
+ VSC_IMG_SKU_CFG_TYPE,
+};
+
+/* image fragments */
+enum {
+ VSC_IMG_BOOTLOADER_FRAG,
+ VSC_IMG_CSI_SEM_FRAG,
+ VSC_IMG_CSI_RUNTIME_FRAG,
+ VSC_IMG_ACE_VISION_FRAG,
+ VSC_IMG_ACE_CFG_FRAG,
+ VSC_IMG_CSI_EM7D_FRAG,
+ VSC_IMG_SKU_CFG_FRAG,
+ VSC_IMG_FRAG_MAX
+};
+
+struct vsc_rom_cmd {
+ __le32 magic;
+ __u8 cmd_id;
+ union {
+ /* download start */
+ struct {
+ __u8 img_type;
+ __le16 option;
+ __le32 img_len;
+ __le32 img_loc;
+ __le32 crc;
+ DECLARE_FLEX_ARRAY(__u8, res);
+ } __packed dl_start;
+ /* download set */
+ struct {
+ __u8 option;
+ __le16 img_cnt;
+ DECLARE_FLEX_ARRAY(__le32, payload);
+ } __packed dl_set;
+ /* download continue */
+ struct {
+ __u8 end_flag;
+ __le16 len;
+ /* 8 is the offset of payload */
+ __u8 payload[VSC_ROM_PKG_SIZE - 8];
+ } __packed dl_cont;
+ /* dump memory */
+ struct {
+ __u8 res;
+ __le16 len;
+ __le32 addr;
+ DECLARE_FLEX_ARRAY(__u8, payload);
+ } __packed dump_mem;
+ /* 5 is the offset of padding */
+ __u8 padding[VSC_ROM_PKG_SIZE - 5];
+ } data;
+};
+
+struct vsc_rom_cmd_ack {
+ __le32 magic;
+ __u8 token;
+ __u8 type;
+ __u8 res[2];
+ __u8 payload[];
+};
+
+struct vsc_fw_cmd {
+ __le32 magic;
+ __u8 cmd_id;
+ union {
+ struct {
+ __le16 option;
+ __u8 img_type;
+ __le32 img_len;
+ __le32 img_loc;
+ __le32 crc;
+ DECLARE_FLEX_ARRAY(__u8, res);
+ } __packed dl_start;
+ struct {
+ __le16 option;
+ __u8 img_cnt;
+ DECLARE_FLEX_ARRAY(__le32, payload);
+ } __packed dl_set;
+ struct {
+ __le32 addr;
+ __u8 len;
+ DECLARE_FLEX_ARRAY(__u8, payload);
+ } __packed dump_mem;
+ struct {
+ __u8 resv[3];
+ __le32 crc;
+ DECLARE_FLEX_ARRAY(__u8, payload);
+ } __packed boot;
+ /* 5 is the offset of padding */
+ __u8 padding[VSC_FW_PKG_SIZE - 5];
+ } data;
+};
+
+struct vsc_img {
+ __le32 magic;
+ __le32 option;
+ __le32 image_count;
+ __le32 image_location[VSC_IMG_CNT_MAX];
+};
+
+struct vsc_fw_sign {
+ __le32 magic;
+ __le32 image_size;
+ __u8 image[];
+};
+
+struct vsc_image_code_data {
+ /* fragment index */
+ u8 frag_index;
+ /* image type */
+ u8 image_type;
+};
+
+struct vsc_img_frag {
+ u8 type;
+ u32 location;
+ const u8 *data;
+ u32 size;
+};
+
+/**
+ * struct vsc_fw_loader - represent vsc firmware loader
+ * @dev: device used to request fimware
+ * @tp: transport layer used with the firmware loader
+ * @csi: CSI image
+ * @ace: ACE image
+ * @cfg: config image
+ * @tx_buf: tx buffer
+ * @rx_buf: rx buffer
+ * @option: command option
+ * @count: total image count
+ * @sensor_name: camera sensor name
+ * @frags: image fragments
+ */
+struct vsc_fw_loader {
+ struct device *dev;
+ struct vsc_tp *tp;
+
+ const struct firmware *csi;
+ const struct firmware *ace;
+ const struct firmware *cfg;
+
+ void *tx_buf;
+ void *rx_buf;
+
+ u16 option;
+ u16 count;
+
+ char sensor_name[VSC_SENSOR_NAME_MAX_LEN];
+
+ struct vsc_img_frag frags[VSC_IMG_FRAG_MAX];
+};
+
+static inline u32 vsc_sum_crc(void *data, size_t size)
+{
+ u32 crc = 0;
+ size_t i;
+
+ for (i = 0; i < size; i++)
+ crc += *((u8 *)data + i);
+
+ return crc;
+}
+
+/* get sensor name to construct image name */
+static int vsc_get_sensor_name(struct vsc_fw_loader *fw_loader,
+ struct device *dev)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER };
+ union acpi_object obj = {
+ .type = ACPI_TYPE_INTEGER,
+ .integer.value = 1,
+ };
+ struct acpi_object_list arg_list = {
+ .count = 1,
+ .pointer = &obj,
+ };
+ union acpi_object *ret_obj;
+ acpi_handle handle;
+ acpi_status status;
+ int ret = 0;
+
+ handle = ACPI_HANDLE(dev);
+ if (!handle)
+ return -EINVAL;
+
+ status = acpi_evaluate_object(handle, "SID", &arg_list, &buffer);
+ if (ACPI_FAILURE(status)) {
+ dev_err(dev, "can't evaluate SID method: %d\n", status);
+ return -ENODEV;
+ }
+
+ ret_obj = buffer.pointer;
+ if (!ret_obj) {
+ dev_err(dev, "can't locate ACPI buffer\n");
+ return -ENODEV;
+ }
+
+ if (ret_obj->type != ACPI_TYPE_STRING) {
+ dev_err(dev, "found non-string entry\n");
+ ret = -ENODEV;
+ goto out_free_buff;
+ }
+
+ /* string length excludes trailing NUL */
+ if (ret_obj->string.length >= sizeof(fw_loader->sensor_name)) {
+ dev_err(dev, "sensor name buffer too small\n");
+ ret = -EINVAL;
+ goto out_free_buff;
+ }
+
+ memcpy(fw_loader->sensor_name, ret_obj->string.pointer,
+ ret_obj->string.length);
+
+ string_lower(fw_loader->sensor_name, fw_loader->sensor_name);
+
+out_free_buff:
+ ACPI_FREE(buffer.pointer);
+
+ return ret;
+}
+
+static int vsc_identify_silicon(struct vsc_fw_loader *fw_loader)
+{
+ struct vsc_rom_cmd_ack *ack = fw_loader->rx_buf;
+ struct vsc_rom_cmd *cmd = fw_loader->tx_buf;
+ u8 version, sub_version;
+ int ret;
+
+ /* identify stepping information */
+ cmd->magic = cpu_to_le32(VSC_MAGIC_NUM);
+ cmd->cmd_id = VSC_CMD_DUMP_MEM;
+ cmd->data.dump_mem.addr = cpu_to_le32(VSC_EFUSE_ADDR);
+ cmd->data.dump_mem.len = cpu_to_le16(sizeof(__le32));
+ ret = vsc_tp_rom_xfer(fw_loader->tp, cmd, ack, VSC_ROM_PKG_SIZE);
+ if (ret)
+ return ret;
+ if (ack->token == VSC_TOKEN_ERROR)
+ return -EINVAL;
+
+ cmd->magic = cpu_to_le32(VSC_MAGIC_NUM);
+ cmd->cmd_id = VSC_CMD_GET_CONT;
+ ret = vsc_tp_rom_xfer(fw_loader->tp, cmd, ack, VSC_ROM_PKG_SIZE);
+ if (ret)
+ return ret;
+ if (ack->token != VSC_TOKEN_DUMP_RESP)
+ return -EINVAL;
+
+ version = FIELD_GET(VSC_MAINSTEPPING_VERSION_MASK, ack->payload[0]);
+ sub_version = FIELD_GET(VSC_SUBSTEPPING_VERSION_MASK, ack->payload[0]);
+
+ if (version != VSC_MAINSTEPPING_VERSION_A)
+ return -EINVAL;
+
+ if (sub_version != VSC_SUBSTEPPING_VERSION_0 &&
+ sub_version != VSC_SUBSTEPPING_VERSION_1)
+ return -EINVAL;
+
+ dev_info(fw_loader->dev, "silicon stepping version is %u:%u\n",
+ version, sub_version);
+
+ /* identify strap information */
+ cmd->magic = cpu_to_le32(VSC_MAGIC_NUM);
+ cmd->cmd_id = VSC_CMD_DUMP_MEM;
+ cmd->data.dump_mem.addr = cpu_to_le32(VSC_STRAP_ADDR);
+ cmd->data.dump_mem.len = cpu_to_le16(sizeof(__le32));
+ ret = vsc_tp_rom_xfer(fw_loader->tp, cmd, ack, VSC_ROM_PKG_SIZE);
+ if (ret)
+ return ret;
+ if (ack->token == VSC_TOKEN_ERROR)
+ return -EINVAL;
+
+ cmd->magic = cpu_to_le32(VSC_MAGIC_NUM);
+ cmd->cmd_id = VSC_CMD_GET_CONT;
+ ret = vsc_tp_rom_xfer(fw_loader->tp, cmd, ack, VSC_ROM_PKG_SIZE);
+ if (ret)
+ return ret;
+ if (ack->token != VSC_TOKEN_DUMP_RESP)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int vsc_identify_csi_image(struct vsc_fw_loader *fw_loader)
+{
+ const struct firmware *image;
+ struct vsc_fw_sign *sign;
+ struct vsc_img *img;
+ unsigned int i;
+ int ret;
+
+ ret = request_firmware(&image, VSC_CSI_IMAGE_NAME, fw_loader->dev);
+ if (ret)
+ return ret;
+
+ img = (struct vsc_img *)image->data;
+ if (!img) {
+ ret = -ENOENT;
+ goto err_release_image;
+ }
+
+ if (le32_to_cpu(img->magic) != VSC_MAGIC_FILE) {
+ ret = -EINVAL;
+ goto err_release_image;
+ }
+
+ if (le32_to_cpu(img->image_count) != VSC_CSI_IMG_CNT) {
+ ret = -EINVAL;
+ goto err_release_image;
+ }
+ fw_loader->count += le32_to_cpu(img->image_count) - 1;
+
+ fw_loader->option =
+ FIELD_GET(VSC_BOOT_IMG_OPTION_MASK, le32_to_cpu(img->option));
+
+ sign = (struct vsc_fw_sign *)
+ (img->image_location + le32_to_cpu(img->image_count));
+
+ for (i = 0; i < VSC_CSI_IMG_CNT; i++) {
+ /* mapping from CSI image index to image code data */
+ static const struct vsc_image_code_data csi_image_map[] = {
+ { VSC_IMG_BOOTLOADER_FRAG, VSC_IMG_BOOTLOADER_TYPE },
+ { VSC_IMG_CSI_SEM_FRAG, VSC_IMG_CSI_SEM_TYPE },
+ { VSC_IMG_CSI_RUNTIME_FRAG, VSC_IMG_CSI_RUNTIME_TYPE },
+ { VSC_IMG_CSI_EM7D_FRAG, VSC_IMG_CSI_EM7D_TYPE },
+ };
+ struct vsc_img_frag *frag;
+
+ if ((u8 *)sign + sizeof(*sign) > image->data + image->size) {
+ ret = -EINVAL;
+ goto err_release_image;
+ }
+
+ if (le32_to_cpu(sign->magic) != VSC_MAGIC_FW) {
+ ret = -EINVAL;
+ goto err_release_image;
+ }
+
+ if (!le32_to_cpu(img->image_location[i])) {
+ ret = -EINVAL;
+ goto err_release_image;
+ }
+
+ frag = &fw_loader->frags[csi_image_map[i].frag_index];
+
+ frag->data = sign->image;
+ frag->size = le32_to_cpu(sign->image_size);
+ frag->location = le32_to_cpu(img->image_location[i]);
+ frag->type = csi_image_map[i].image_type;
+
+ sign = (struct vsc_fw_sign *)
+ (sign->image + le32_to_cpu(sign->image_size));
+ }
+
+ fw_loader->csi = image;
+
+ return 0;
+
+err_release_image:
+ release_firmware(image);
+
+ return ret;
+}
+
+static int vsc_identify_ace_image(struct vsc_fw_loader *fw_loader)
+{
+ char path[VSC_IMAGE_PATH_MAX_LEN];
+ const struct firmware *image;
+ struct vsc_fw_sign *sign;
+ struct vsc_img *img;
+ unsigned int i;
+ int ret;
+
+ snprintf(path, sizeof(path), VSC_ACE_IMAGE_NAME_FMT,
+ fw_loader->sensor_name);
+
+ ret = request_firmware(&image, path, fw_loader->dev);
+ if (ret)
+ return ret;
+
+ img = (struct vsc_img *)image->data;
+ if (!img) {
+ ret = -ENOENT;
+ goto err_release_image;
+ }
+
+ if (le32_to_cpu(img->magic) != VSC_MAGIC_FILE) {
+ ret = -EINVAL;
+ goto err_release_image;
+ }
+
+ if (le32_to_cpu(img->image_count) != VSC_ACE_IMG_CNT) {
+ ret = -EINVAL;
+ goto err_release_image;
+ }
+ fw_loader->count += le32_to_cpu(img->image_count);
+
+ sign = (struct vsc_fw_sign *)
+ (img->image_location + le32_to_cpu(img->image_count));
+
+ for (i = 0; i < VSC_ACE_IMG_CNT; i++) {
+ /* mapping from ACE image index to image code data */
+ static const struct vsc_image_code_data ace_image_map[] = {
+ { VSC_IMG_ACE_VISION_FRAG, VSC_IMG_ACE_VISION_TYPE },
+ { VSC_IMG_ACE_CFG_FRAG, VSC_IMG_ACE_CFG_TYPE },
+ };
+ struct vsc_img_frag *frag, *last_frag;
+ u8 frag_index;
+
+ if ((u8 *)sign + sizeof(*sign) > image->data + image->size) {
+ ret = -EINVAL;
+ goto err_release_image;
+ }
+
+ if (le32_to_cpu(sign->magic) != VSC_MAGIC_FW) {
+ ret = -EINVAL;
+ goto err_release_image;
+ }
+
+ frag_index = ace_image_map[i].frag_index;
+ frag = &fw_loader->frags[frag_index];
+
+ frag->data = sign->image;
+ frag->size = le32_to_cpu(sign->image_size);
+ frag->location = le32_to_cpu(img->image_location[i]);
+ frag->type = ace_image_map[i].image_type;
+
+ if (!frag->location) {
+ last_frag = &fw_loader->frags[frag_index - 1];
+ frag->location =
+ ALIGN(last_frag->location + last_frag->size, SZ_4K);
+ }
+
+ sign = (struct vsc_fw_sign *)
+ (sign->image + le32_to_cpu(sign->image_size));
+ }
+
+ fw_loader->ace = image;
+
+ return 0;
+
+err_release_image:
+ release_firmware(image);
+
+ return ret;
+}
+
+static int vsc_identify_cfg_image(struct vsc_fw_loader *fw_loader)
+{
+ struct vsc_img_frag *frag = &fw_loader->frags[VSC_IMG_SKU_CFG_FRAG];
+ char path[VSC_IMAGE_PATH_MAX_LEN];
+ const struct firmware *image;
+ u32 size;
+ int ret;
+
+ snprintf(path, sizeof(path), VSC_CFG_IMAGE_NAME_FMT,
+ fw_loader->sensor_name);
+
+ ret = request_firmware(&image, path, fw_loader->dev);
+ if (ret)
+ return ret;
+
+ /* identify image size */
+ if (image->size <= sizeof(u32) || image->size > VSC_SKU_MAX_SIZE) {
+ ret = -EINVAL;
+ goto err_release_image;
+ }
+
+ size = le32_to_cpu(*((__le32 *)image->data)) + sizeof(u32);
+ if (image->size != size) {
+ ret = -EINVAL;
+ goto err_release_image;
+ }
+
+ frag->data = image->data;
+ frag->size = image->size;
+ frag->type = VSC_IMG_SKU_CFG_TYPE;
+ frag->location = VSC_SKU_CFG_LOCATION;
+
+ fw_loader->cfg = image;
+
+ return 0;
+
+err_release_image:
+ release_firmware(image);
+
+ return ret;
+}
+
+static int vsc_download_bootloader(struct vsc_fw_loader *fw_loader)
+{
+ struct vsc_img_frag *frag = &fw_loader->frags[VSC_IMG_BOOTLOADER_FRAG];
+ struct vsc_rom_cmd_ack *ack = fw_loader->rx_buf;
+ struct vsc_rom_cmd *cmd = fw_loader->tx_buf;
+ u32 len, c_len;
+ size_t remain;
+ const u8 *p;
+ int ret;
+
+ cmd->magic = cpu_to_le32(VSC_MAGIC_NUM);
+ cmd->cmd_id = VSC_CMD_QUERY;
+ ret = vsc_tp_rom_xfer(fw_loader->tp, cmd, ack, VSC_ROM_PKG_SIZE);
+ if (ret)
+ return ret;
+ if (ack->token != VSC_TOKEN_DUMP_RESP &&
+ ack->token != VSC_TOKEN_BOOTLOADER_REQ)
+ return -EINVAL;
+
+ cmd->magic = cpu_to_le32(VSC_MAGIC_NUM);
+ cmd->cmd_id = VSC_CMD_DL_START;
+ cmd->data.dl_start.option = cpu_to_le16(fw_loader->option);
+ cmd->data.dl_start.img_type = frag->type;
+ cmd->data.dl_start.img_len = cpu_to_le32(frag->size);
+ cmd->data.dl_start.img_loc = cpu_to_le32(frag->location);
+
+ c_len = offsetof(struct vsc_rom_cmd, data.dl_start.crc);
+ cmd->data.dl_start.crc = cpu_to_le32(vsc_sum_crc(cmd, c_len));
+
+ ret = vsc_tp_rom_xfer(fw_loader->tp, cmd, NULL, VSC_ROM_PKG_SIZE);
+ if (ret)
+ return ret;
+
+ p = frag->data;
+ remain = frag->size;
+
+ /* download image data */
+ while (remain > 0) {
+ len = min(remain, sizeof(cmd->data.dl_cont.payload));
+
+ cmd->magic = cpu_to_le32(VSC_MAGIC_NUM);
+ cmd->cmd_id = VSC_CMD_DL_CONT;
+ cmd->data.dl_cont.len = cpu_to_le16(len);
+ cmd->data.dl_cont.end_flag = remain == len;
+ memcpy(cmd->data.dl_cont.payload, p, len);
+
+ ret = vsc_tp_rom_xfer(fw_loader->tp, cmd, NULL, VSC_ROM_PKG_SIZE);
+ if (ret)
+ return ret;
+
+ p += len;
+ remain -= len;
+ }
+
+ return 0;
+}
+
+static int vsc_download_firmware(struct vsc_fw_loader *fw_loader)
+{
+ struct vsc_fw_cmd *cmd = fw_loader->tx_buf;
+ unsigned int i, index = 0;
+ u32 c_len;
+ int ret;
+
+ cmd->magic = cpu_to_le32(VSC_MAGIC_NUM);
+ cmd->cmd_id = VSC_CMD_DL_SET;
+ cmd->data.dl_set.img_cnt = cpu_to_le16(fw_loader->count);
+ put_unaligned_le16(fw_loader->option, &cmd->data.dl_set.option);
+
+ for (i = VSC_IMG_CSI_SEM_FRAG; i <= VSC_IMG_CSI_EM7D_FRAG; i++) {
+ struct vsc_img_frag *frag = &fw_loader->frags[i];
+
+ cmd->data.dl_set.payload[index++] = cpu_to_le32(frag->location);
+ cmd->data.dl_set.payload[index++] = cpu_to_le32(frag->size);
+ }
+
+ c_len = offsetof(struct vsc_fw_cmd, data.dl_set.payload[index]);
+ cmd->data.dl_set.payload[index] = cpu_to_le32(vsc_sum_crc(cmd, c_len));
+
+ ret = vsc_tp_rom_xfer(fw_loader->tp, cmd, NULL, VSC_FW_PKG_SIZE);
+ if (ret)
+ return ret;
+
+ for (i = VSC_IMG_CSI_SEM_FRAG; i < VSC_IMG_FRAG_MAX; i++) {
+ struct vsc_img_frag *frag = &fw_loader->frags[i];
+ const u8 *p;
+ u32 remain;
+
+ cmd->magic = cpu_to_le32(VSC_MAGIC_NUM);
+ cmd->cmd_id = VSC_CMD_DL_START;
+ cmd->data.dl_start.img_type = frag->type;
+ cmd->data.dl_start.img_len = cpu_to_le32(frag->size);
+ cmd->data.dl_start.img_loc = cpu_to_le32(frag->location);
+ put_unaligned_le16(fw_loader->option, &cmd->data.dl_start.option);
+
+ c_len = offsetof(struct vsc_fw_cmd, data.dl_start.crc);
+ cmd->data.dl_start.crc = cpu_to_le32(vsc_sum_crc(cmd, c_len));
+
+ ret = vsc_tp_rom_xfer(fw_loader->tp, cmd, NULL, VSC_FW_PKG_SIZE);
+ if (ret)
+ return ret;
+
+ p = frag->data;
+ remain = frag->size;
+
+ /* download image data */
+ while (remain > 0) {
+ u32 len = min(remain, VSC_FW_PKG_SIZE);
+
+ memcpy(fw_loader->tx_buf, p, len);
+ memset(fw_loader->tx_buf + len, 0, VSC_FW_PKG_SIZE - len);
+
+ ret = vsc_tp_rom_xfer(fw_loader->tp, fw_loader->tx_buf,
+ NULL, VSC_FW_PKG_SIZE);
+ if (ret)
+ break;
+
+ p += len;
+ remain -= len;
+ }
+ }
+
+ cmd->magic = cpu_to_le32(VSC_MAGIC_NUM);
+ cmd->cmd_id = VSC_CMD_CAM_BOOT;
+
+ c_len = offsetof(struct vsc_fw_cmd, data.dl_start.crc);
+ cmd->data.boot.crc = cpu_to_le32(vsc_sum_crc(cmd, c_len));
+
+ return vsc_tp_rom_xfer(fw_loader->tp, cmd, NULL, VSC_FW_PKG_SIZE);
+}
+
+/**
+ * vsc_tp_init - init vsc_tp
+ * @tp: vsc_tp device handle
+ * @dev: device node for mei vsc device
+ * Return: 0 in case of success, negative value in case of error
+ */
+int vsc_tp_init(struct vsc_tp *tp, struct device *dev)
+{
+ struct vsc_fw_loader *fw_loader __free(kfree) = NULL;
+ void *tx_buf __free(kfree) = NULL;
+ void *rx_buf __free(kfree) = NULL;
+ int ret;
+
+ fw_loader = kzalloc(sizeof(*fw_loader), GFP_KERNEL);
+ if (!fw_loader)
+ return -ENOMEM;
+
+ tx_buf = kzalloc(VSC_FW_PKG_SIZE, GFP_KERNEL);
+ if (!tx_buf)
+ return -ENOMEM;
+
+ rx_buf = kzalloc(VSC_FW_PKG_SIZE, GFP_KERNEL);
+ if (!rx_buf)
+ return -ENOMEM;
+
+ fw_loader->tx_buf = tx_buf;
+ fw_loader->rx_buf = rx_buf;
+
+ fw_loader->tp = tp;
+ fw_loader->dev = dev;
+
+ ret = vsc_get_sensor_name(fw_loader, dev);
+ if (ret)
+ return ret;
+
+ ret = vsc_identify_silicon(fw_loader);
+ if (ret)
+ return ret;
+
+ ret = vsc_identify_csi_image(fw_loader);
+ if (ret)
+ return ret;
+
+ ret = vsc_identify_ace_image(fw_loader);
+ if (ret)
+ goto err_release_csi;
+
+ ret = vsc_identify_cfg_image(fw_loader);
+ if (ret)
+ goto err_release_ace;
+
+ ret = vsc_download_bootloader(fw_loader);
+ if (!ret)
+ ret = vsc_download_firmware(fw_loader);
+
+ release_firmware(fw_loader->cfg);
+
+err_release_ace:
+ release_firmware(fw_loader->ace);
+
+err_release_csi:
+ release_firmware(fw_loader->csi);
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(vsc_tp_init, VSC_TP);
diff --git a/drivers/misc/mei/vsc-tp.c b/drivers/misc/mei/vsc-tp.c
new file mode 100644
index 000000000000..6f4a4be6ccb5
--- /dev/null
+++ b/drivers/misc/mei/vsc-tp.c
@@ -0,0 +1,555 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, Intel Corporation.
+ * Intel Visual Sensing Controller Transport Layer Linux driver
+ */
+
+#include <linux/acpi.h>
+#include <linux/cleanup.h>
+#include <linux/crc32.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/irqreturn.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+
+#include "vsc-tp.h"
+
+#define VSC_TP_RESET_PIN_TOGGLE_INTERVAL_MS 20
+#define VSC_TP_ROM_BOOTUP_DELAY_MS 10
+#define VSC_TP_ROM_XFER_POLL_TIMEOUT_US (500 * USEC_PER_MSEC)
+#define VSC_TP_ROM_XFER_POLL_DELAY_US (20 * USEC_PER_MSEC)
+#define VSC_TP_WAIT_FW_ASSERTED_TIMEOUT (2 * HZ)
+#define VSC_TP_MAX_XFER_COUNT 5
+
+#define VSC_TP_PACKET_SYNC 0x31
+#define VSC_TP_CRC_SIZE sizeof(u32)
+#define VSC_TP_MAX_MSG_SIZE 2048
+/* SPI xfer timeout size */
+#define VSC_TP_XFER_TIMEOUT_BYTES 700
+#define VSC_TP_PACKET_PADDING_SIZE 1
+#define VSC_TP_PACKET_SIZE(pkt) \
+ (sizeof(struct vsc_tp_packet) + le16_to_cpu((pkt)->len) + VSC_TP_CRC_SIZE)
+#define VSC_TP_MAX_PACKET_SIZE \
+ (sizeof(struct vsc_tp_packet) + VSC_TP_MAX_MSG_SIZE + VSC_TP_CRC_SIZE)
+#define VSC_TP_MAX_XFER_SIZE \
+ (VSC_TP_MAX_PACKET_SIZE + VSC_TP_XFER_TIMEOUT_BYTES)
+#define VSC_TP_NEXT_XFER_LEN(len, offset) \
+ (len + sizeof(struct vsc_tp_packet) + VSC_TP_CRC_SIZE - offset + VSC_TP_PACKET_PADDING_SIZE)
+
+struct vsc_tp_packet {
+ __u8 sync;
+ __u8 cmd;
+ __le16 len;
+ __le32 seq;
+ __u8 buf[] __counted_by(len);
+};
+
+struct vsc_tp {
+ /* do the actual data transfer */
+ struct spi_device *spi;
+
+ /* bind with mei framework */
+ struct platform_device *pdev;
+
+ struct gpio_desc *wakeuphost;
+ struct gpio_desc *resetfw;
+ struct gpio_desc *wakeupfw;
+
+ /* command sequence number */
+ u32 seq;
+
+ /* command buffer */
+ void *tx_buf;
+ void *rx_buf;
+
+ atomic_t assert_cnt;
+ wait_queue_head_t xfer_wait;
+
+ vsc_tp_event_cb_t event_notify;
+ void *event_notify_context;
+
+ /* used to protect command download */
+ struct mutex mutex;
+};
+
+/* GPIO resources */
+static const struct acpi_gpio_params wakeuphost_gpio = { 0, 0, false };
+static const struct acpi_gpio_params wakeuphostint_gpio = { 1, 0, false };
+static const struct acpi_gpio_params resetfw_gpio = { 2, 0, false };
+static const struct acpi_gpio_params wakeupfw = { 3, 0, false };
+
+static const struct acpi_gpio_mapping vsc_tp_acpi_gpios[] = {
+ { "wakeuphost-gpios", &wakeuphost_gpio, 1 },
+ { "wakeuphostint-gpios", &wakeuphostint_gpio, 1 },
+ { "resetfw-gpios", &resetfw_gpio, 1 },
+ { "wakeupfw-gpios", &wakeupfw, 1 },
+ {}
+};
+
+/* wakeup firmware and wait for response */
+static int vsc_tp_wakeup_request(struct vsc_tp *tp)
+{
+ int ret;
+
+ gpiod_set_value_cansleep(tp->wakeupfw, 0);
+
+ ret = wait_event_timeout(tp->xfer_wait,
+ atomic_read(&tp->assert_cnt) &&
+ gpiod_get_value_cansleep(tp->wakeuphost),
+ VSC_TP_WAIT_FW_ASSERTED_TIMEOUT);
+ if (!ret)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static void vsc_tp_wakeup_release(struct vsc_tp *tp)
+{
+ atomic_dec_if_positive(&tp->assert_cnt);
+
+ gpiod_set_value_cansleep(tp->wakeupfw, 1);
+}
+
+static int vsc_tp_dev_xfer(struct vsc_tp *tp, void *obuf, void *ibuf, size_t len)
+{
+ struct spi_message msg = { 0 };
+ struct spi_transfer xfer = {
+ .tx_buf = obuf,
+ .rx_buf = ibuf,
+ .len = len,
+ };
+
+ spi_message_init_with_transfers(&msg, &xfer, 1);
+
+ return spi_sync_locked(tp->spi, &msg);
+}
+
+static int vsc_tp_xfer_helper(struct vsc_tp *tp, struct vsc_tp_packet *pkt,
+ void *ibuf, u16 ilen)
+{
+ int ret, offset = 0, cpy_len, src_len, dst_len = sizeof(struct vsc_tp_packet);
+ int next_xfer_len = VSC_TP_PACKET_SIZE(pkt) + VSC_TP_XFER_TIMEOUT_BYTES;
+ u8 *src, *crc_src, *rx_buf = tp->rx_buf;
+ int count_down = VSC_TP_MAX_XFER_COUNT;
+ u32 recv_crc = 0, crc = ~0;
+ struct vsc_tp_packet ack;
+ u8 *dst = (u8 *)&ack;
+ bool synced = false;
+
+ do {
+ ret = vsc_tp_dev_xfer(tp, pkt, rx_buf, next_xfer_len);
+ if (ret)
+ return ret;
+ memset(pkt, 0, VSC_TP_MAX_XFER_SIZE);
+
+ if (synced) {
+ src = rx_buf;
+ src_len = next_xfer_len;
+ } else {
+ src = memchr(rx_buf, VSC_TP_PACKET_SYNC, next_xfer_len);
+ if (!src)
+ continue;
+ synced = true;
+ src_len = next_xfer_len - (src - rx_buf);
+ }
+
+ /* traverse received data */
+ while (src_len > 0) {
+ cpy_len = min(src_len, dst_len);
+ memcpy(dst, src, cpy_len);
+ crc_src = src;
+ src += cpy_len;
+ src_len -= cpy_len;
+ dst += cpy_len;
+ dst_len -= cpy_len;
+
+ if (offset < sizeof(ack)) {
+ offset += cpy_len;
+ crc = crc32(crc, crc_src, cpy_len);
+
+ if (!src_len)
+ continue;
+
+ if (le16_to_cpu(ack.len)) {
+ dst = ibuf;
+ dst_len = min(ilen, le16_to_cpu(ack.len));
+ } else {
+ dst = (u8 *)&recv_crc;
+ dst_len = sizeof(recv_crc);
+ }
+ } else if (offset < sizeof(ack) + le16_to_cpu(ack.len)) {
+ offset += cpy_len;
+ crc = crc32(crc, crc_src, cpy_len);
+
+ if (src_len) {
+ int remain = sizeof(ack) + le16_to_cpu(ack.len) - offset;
+
+ cpy_len = min(src_len, remain);
+ offset += cpy_len;
+ crc = crc32(crc, src, cpy_len);
+ src += cpy_len;
+ src_len -= cpy_len;
+ if (src_len) {
+ dst = (u8 *)&recv_crc;
+ dst_len = sizeof(recv_crc);
+ continue;
+ }
+ }
+ next_xfer_len = VSC_TP_NEXT_XFER_LEN(le16_to_cpu(ack.len), offset);
+ } else if (offset < sizeof(ack) + le16_to_cpu(ack.len) + VSC_TP_CRC_SIZE) {
+ offset += cpy_len;
+
+ if (src_len) {
+ /* terminate the traverse */
+ next_xfer_len = 0;
+ break;
+ }
+ next_xfer_len = VSC_TP_NEXT_XFER_LEN(le16_to_cpu(ack.len), offset);
+ }
+ }
+ } while (next_xfer_len > 0 && --count_down);
+
+ if (next_xfer_len > 0)
+ return -EAGAIN;
+
+ if (~recv_crc != crc || le32_to_cpu(ack.seq) != tp->seq) {
+ dev_err(&tp->spi->dev, "recv crc or seq error\n");
+ return -EINVAL;
+ }
+
+ if (ack.cmd == VSC_TP_CMD_ACK || ack.cmd == VSC_TP_CMD_NACK ||
+ ack.cmd == VSC_TP_CMD_BUSY) {
+ dev_err(&tp->spi->dev, "recv cmd ack error\n");
+ return -EAGAIN;
+ }
+
+ return min(le16_to_cpu(ack.len), ilen);
+}
+
+/**
+ * vsc_tp_xfer - transfer data to firmware
+ * @tp: vsc_tp device handle
+ * @cmd: the command to be sent to the device
+ * @obuf: the tx buffer to be sent to the device
+ * @olen: the length of tx buffer
+ * @ibuf: the rx buffer to receive from the device
+ * @ilen: the length of rx buffer
+ * Return: the length of received data in case of success,
+ * otherwise negative value
+ */
+int vsc_tp_xfer(struct vsc_tp *tp, u8 cmd, const void *obuf, size_t olen,
+ void *ibuf, size_t ilen)
+{
+ struct vsc_tp_packet *pkt = tp->tx_buf;
+ u32 crc;
+ int ret;
+
+ if (!obuf || !ibuf || olen > VSC_TP_MAX_MSG_SIZE)
+ return -EINVAL;
+
+ guard(mutex)(&tp->mutex);
+
+ pkt->sync = VSC_TP_PACKET_SYNC;
+ pkt->cmd = cmd;
+ pkt->len = cpu_to_le16(olen);
+ pkt->seq = cpu_to_le32(++tp->seq);
+ memcpy(pkt->buf, obuf, olen);
+
+ crc = ~crc32(~0, (u8 *)pkt, sizeof(pkt) + olen);
+ memcpy(pkt->buf + olen, &crc, sizeof(crc));
+
+ ret = vsc_tp_wakeup_request(tp);
+ if (unlikely(ret))
+ dev_err(&tp->spi->dev, "wakeup firmware failed ret: %d\n", ret);
+ else
+ ret = vsc_tp_xfer_helper(tp, pkt, ibuf, ilen);
+
+ vsc_tp_wakeup_release(tp);
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(vsc_tp_xfer, VSC_TP);
+
+/**
+ * vsc_tp_rom_xfer - transfer data to rom code
+ * @tp: vsc_tp device handle
+ * @obuf: the data buffer to be sent to the device
+ * @ibuf: the buffer to receive data from the device
+ * @len: the length of tx buffer and rx buffer
+ * Return: 0 in case of success, negative value in case of error
+ */
+int vsc_tp_rom_xfer(struct vsc_tp *tp, const void *obuf, void *ibuf, size_t len)
+{
+ size_t words = len / sizeof(__be32);
+ int ret;
+
+ if (len % sizeof(__be32) || len > VSC_TP_MAX_MSG_SIZE)
+ return -EINVAL;
+
+ guard(mutex)(&tp->mutex);
+
+ /* rom xfer is big endian */
+ cpu_to_be32_array(tp->tx_buf, obuf, words);
+
+ ret = read_poll_timeout(gpiod_get_value_cansleep, ret,
+ !ret, VSC_TP_ROM_XFER_POLL_DELAY_US,
+ VSC_TP_ROM_XFER_POLL_TIMEOUT_US, false,
+ tp->wakeuphost);
+ if (ret) {
+ dev_err(&tp->spi->dev, "wait rom failed ret: %d\n", ret);
+ return ret;
+ }
+
+ ret = vsc_tp_dev_xfer(tp, tp->tx_buf, tp->rx_buf, len);
+ if (ret)
+ return ret;
+
+ if (ibuf)
+ cpu_to_be32_array(ibuf, tp->rx_buf, words);
+
+ return ret;
+}
+
+/**
+ * vsc_tp_reset - reset vsc transport layer
+ * @tp: vsc_tp device handle
+ */
+void vsc_tp_reset(struct vsc_tp *tp)
+{
+ disable_irq(tp->spi->irq);
+
+ /* toggle reset pin */
+ gpiod_set_value_cansleep(tp->resetfw, 0);
+ msleep(VSC_TP_RESET_PIN_TOGGLE_INTERVAL_MS);
+ gpiod_set_value_cansleep(tp->resetfw, 1);
+
+ /* wait for ROM */
+ msleep(VSC_TP_ROM_BOOTUP_DELAY_MS);
+
+ /*
+ * Set default host wakeup pin to non-active
+ * to avoid unexpected host irq interrupt.
+ */
+ gpiod_set_value_cansleep(tp->wakeupfw, 1);
+
+ atomic_set(&tp->assert_cnt, 0);
+
+ enable_irq(tp->spi->irq);
+}
+EXPORT_SYMBOL_NS_GPL(vsc_tp_reset, VSC_TP);
+
+/**
+ * vsc_tp_need_read - check if device has data to sent
+ * @tp: vsc_tp device handle
+ * Return: true if device has data to sent, otherwise false
+ */
+bool vsc_tp_need_read(struct vsc_tp *tp)
+{
+ if (!atomic_read(&tp->assert_cnt))
+ return false;
+ if (!gpiod_get_value_cansleep(tp->wakeuphost))
+ return false;
+ if (!gpiod_get_value_cansleep(tp->wakeupfw))
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL_NS_GPL(vsc_tp_need_read, VSC_TP);
+
+/**
+ * vsc_tp_register_event_cb - register a callback function to receive event
+ * @tp: vsc_tp device handle
+ * @event_cb: callback function
+ * @context: execution context of event callback
+ * Return: 0 in case of success, negative value in case of error
+ */
+int vsc_tp_register_event_cb(struct vsc_tp *tp, vsc_tp_event_cb_t event_cb,
+ void *context)
+{
+ tp->event_notify = event_cb;
+ tp->event_notify_context = context;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(vsc_tp_register_event_cb, VSC_TP);
+
+/**
+ * vsc_tp_intr_synchronize - synchronize vsc_tp interrupt
+ * @tp: vsc_tp device handle
+ */
+void vsc_tp_intr_synchronize(struct vsc_tp *tp)
+{
+ synchronize_irq(tp->spi->irq);
+}
+EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_synchronize, VSC_TP);
+
+/**
+ * vsc_tp_intr_enable - enable vsc_tp interrupt
+ * @tp: vsc_tp device handle
+ */
+void vsc_tp_intr_enable(struct vsc_tp *tp)
+{
+ enable_irq(tp->spi->irq);
+}
+EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_enable, VSC_TP);
+
+/**
+ * vsc_tp_intr_disable - disable vsc_tp interrupt
+ * @tp: vsc_tp device handle
+ */
+void vsc_tp_intr_disable(struct vsc_tp *tp)
+{
+ disable_irq(tp->spi->irq);
+}
+EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_disable, VSC_TP);
+
+static irqreturn_t vsc_tp_isr(int irq, void *data)
+{
+ struct vsc_tp *tp = data;
+
+ atomic_inc(&tp->assert_cnt);
+
+ wake_up(&tp->xfer_wait);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t vsc_tp_thread_isr(int irq, void *data)
+{
+ struct vsc_tp *tp = data;
+
+ if (tp->event_notify)
+ tp->event_notify(tp->event_notify_context);
+
+ return IRQ_HANDLED;
+}
+
+static int vsc_tp_match_any(struct acpi_device *adev, void *data)
+{
+ struct acpi_device **__adev = data;
+
+ *__adev = adev;
+
+ return 1;
+}
+
+static int vsc_tp_probe(struct spi_device *spi)
+{
+ struct platform_device_info pinfo = { 0 };
+ struct device *dev = &spi->dev;
+ struct platform_device *pdev;
+ struct acpi_device *adev;
+ struct vsc_tp *tp;
+ int ret;
+
+ tp = devm_kzalloc(dev, sizeof(*tp), GFP_KERNEL);
+ if (!tp)
+ return -ENOMEM;
+
+ tp->tx_buf = devm_kzalloc(dev, VSC_TP_MAX_XFER_SIZE, GFP_KERNEL);
+ if (!tp->tx_buf)
+ return -ENOMEM;
+
+ tp->rx_buf = devm_kzalloc(dev, VSC_TP_MAX_XFER_SIZE, GFP_KERNEL);
+ if (!tp->rx_buf)
+ return -ENOMEM;
+
+ ret = devm_acpi_dev_add_driver_gpios(dev, vsc_tp_acpi_gpios);
+ if (ret)
+ return ret;
+
+ tp->wakeuphost = devm_gpiod_get(dev, "wakeuphost", GPIOD_IN);
+ if (IS_ERR(tp->wakeuphost))
+ return PTR_ERR(tp->wakeuphost);
+
+ tp->resetfw = devm_gpiod_get(dev, "resetfw", GPIOD_OUT_HIGH);
+ if (IS_ERR(tp->resetfw))
+ return PTR_ERR(tp->resetfw);
+
+ tp->wakeupfw = devm_gpiod_get(dev, "wakeupfw", GPIOD_OUT_HIGH);
+ if (IS_ERR(tp->wakeupfw))
+ return PTR_ERR(tp->wakeupfw);
+
+ atomic_set(&tp->assert_cnt, 0);
+ init_waitqueue_head(&tp->xfer_wait);
+ tp->spi = spi;
+
+ irq_set_status_flags(spi->irq, IRQ_DISABLE_UNLAZY);
+ ret = devm_request_threaded_irq(dev, spi->irq, vsc_tp_isr,
+ vsc_tp_thread_isr,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ dev_name(dev), tp);
+ if (ret)
+ return ret;
+
+ mutex_init(&tp->mutex);
+
+ /* only one child acpi device */
+ ret = acpi_dev_for_each_child(ACPI_COMPANION(dev),
+ vsc_tp_match_any, &adev);
+ if (!ret) {
+ ret = -ENODEV;
+ goto err_destroy_lock;
+ }
+ pinfo.fwnode = acpi_fwnode_handle(adev);
+
+ pinfo.name = "intel_vsc";
+ pinfo.data = &tp;
+ pinfo.size_data = sizeof(tp);
+ pinfo.id = PLATFORM_DEVID_NONE;
+
+ pdev = platform_device_register_full(&pinfo);
+ if (IS_ERR(pdev)) {
+ ret = PTR_ERR(pdev);
+ goto err_destroy_lock;
+ }
+
+ tp->pdev = pdev;
+ spi_set_drvdata(spi, tp);
+
+ return 0;
+
+err_destroy_lock:
+ mutex_destroy(&tp->mutex);
+
+ return ret;
+}
+
+static void vsc_tp_remove(struct spi_device *spi)
+{
+ struct vsc_tp *tp = spi_get_drvdata(spi);
+
+ platform_device_unregister(tp->pdev);
+
+ mutex_destroy(&tp->mutex);
+}
+
+static const struct acpi_device_id vsc_tp_acpi_ids[] = {
+ { "INTC1009" }, /* Raptor Lake */
+ { "INTC1058" }, /* Tiger Lake */
+ { "INTC1094" }, /* Alder Lake */
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, vsc_tp_acpi_ids);
+
+static struct spi_driver vsc_tp_driver = {
+ .probe = vsc_tp_probe,
+ .remove = vsc_tp_remove,
+ .driver = {
+ .name = "vsc-tp",
+ .acpi_match_table = vsc_tp_acpi_ids,
+ },
+};
+module_spi_driver(vsc_tp_driver);
+
+MODULE_AUTHOR("Wentong Wu <wentong.wu@intel.com>");
+MODULE_AUTHOR("Zhifeng Wang <zhifeng.wang@intel.com>");
+MODULE_DESCRIPTION("Intel Visual Sensing Controller Transport Layer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/mei/vsc-tp.h b/drivers/misc/mei/vsc-tp.h
new file mode 100644
index 000000000000..f9513ddc3e40
--- /dev/null
+++ b/drivers/misc/mei/vsc-tp.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023, Intel Corporation.
+ * Intel Visual Sensing Controller Transport Layer Linux driver
+ */
+
+#ifndef _VSC_TP_H_
+#define _VSC_TP_H_
+
+#include <linux/types.h>
+
+#define VSC_TP_CMD_WRITE 0x01
+#define VSC_TP_CMD_READ 0x02
+
+#define VSC_TP_CMD_ACK 0x10
+#define VSC_TP_CMD_NACK 0x11
+#define VSC_TP_CMD_BUSY 0x12
+
+struct vsc_tp;
+
+/**
+ * typedef vsc_event_cb_t - event callback function signature
+ * @context: the execution context of who registered this callback
+ *
+ * The callback function is called in interrupt context and the data
+ * payload is only valid during the call. If the user needs access
+ * the data payload later, it must copy the payload.
+ */
+typedef void (*vsc_tp_event_cb_t)(void *context);
+
+int vsc_tp_rom_xfer(struct vsc_tp *tp, const void *obuf, void *ibuf,
+ size_t len);
+
+int vsc_tp_xfer(struct vsc_tp *tp, u8 cmd, const void *obuf, size_t olen,
+ void *ibuf, size_t ilen);
+
+int vsc_tp_register_event_cb(struct vsc_tp *tp, vsc_tp_event_cb_t event_cb,
+ void *context);
+
+void vsc_tp_intr_enable(struct vsc_tp *tp);
+void vsc_tp_intr_disable(struct vsc_tp *tp);
+void vsc_tp_intr_synchronize(struct vsc_tp *tp);
+
+void vsc_tp_reset(struct vsc_tp *tp);
+
+bool vsc_tp_need_read(struct vsc_tp *tp);
+
+int vsc_tp_init(struct vsc_tp *tp, struct device *dev);
+
+#endif
diff --git a/drivers/misc/nsm.c b/drivers/misc/nsm.c
new file mode 100644
index 000000000000..0eaa3b4484bd
--- /dev/null
+++ b/drivers/misc/nsm.c
@@ -0,0 +1,506 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Amazon Nitro Secure Module driver.
+ *
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * The Nitro Secure Module implements commands via CBOR over virtio.
+ * This driver exposes a raw message ioctls on /dev/nsm that user
+ * space can use to issue these commands.
+ */
+
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/hw_random.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/uio.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio.h>
+#include <linux/wait.h>
+#include <uapi/linux/nsm.h>
+
+/* Timeout for NSM virtqueue respose in milliseconds. */
+#define NSM_DEFAULT_TIMEOUT_MSECS (120000) /* 2 minutes */
+
+/* Maximum length input data */
+struct nsm_data_req {
+ u32 len;
+ u8 data[NSM_REQUEST_MAX_SIZE];
+};
+
+/* Maximum length output data */
+struct nsm_data_resp {
+ u32 len;
+ u8 data[NSM_RESPONSE_MAX_SIZE];
+};
+
+/* Full NSM request/response message */
+struct nsm_msg {
+ struct nsm_data_req req;
+ struct nsm_data_resp resp;
+};
+
+struct nsm {
+ struct virtio_device *vdev;
+ struct virtqueue *vq;
+ struct mutex lock;
+ struct completion cmd_done;
+ struct miscdevice misc;
+ struct hwrng hwrng;
+ struct work_struct misc_init;
+ struct nsm_msg msg;
+};
+
+/* NSM device ID */
+static const struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_NITRO_SEC_MOD, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+
+static struct nsm *file_to_nsm(struct file *file)
+{
+ return container_of(file->private_data, struct nsm, misc);
+}
+
+static struct nsm *hwrng_to_nsm(struct hwrng *rng)
+{
+ return container_of(rng, struct nsm, hwrng);
+}
+
+#define CBOR_TYPE_MASK 0xE0
+#define CBOR_TYPE_MAP 0xA0
+#define CBOR_TYPE_TEXT 0x60
+#define CBOR_TYPE_ARRAY 0x40
+#define CBOR_HEADER_SIZE_SHORT 1
+
+#define CBOR_SHORT_SIZE_MAX_VALUE 23
+#define CBOR_LONG_SIZE_U8 24
+#define CBOR_LONG_SIZE_U16 25
+#define CBOR_LONG_SIZE_U32 26
+#define CBOR_LONG_SIZE_U64 27
+
+static bool cbor_object_is_array(const u8 *cbor_object, size_t cbor_object_size)
+{
+ if (cbor_object_size == 0 || cbor_object == NULL)
+ return false;
+
+ return (cbor_object[0] & CBOR_TYPE_MASK) == CBOR_TYPE_ARRAY;
+}
+
+static int cbor_object_get_array(u8 *cbor_object, size_t cbor_object_size, u8 **cbor_array)
+{
+ u8 cbor_short_size;
+ void *array_len_p;
+ u64 array_len;
+ u64 array_offset;
+
+ if (!cbor_object_is_array(cbor_object, cbor_object_size))
+ return -EFAULT;
+
+ cbor_short_size = (cbor_object[0] & 0x1F);
+
+ /* Decoding byte array length */
+ array_offset = CBOR_HEADER_SIZE_SHORT;
+ if (cbor_short_size >= CBOR_LONG_SIZE_U8)
+ array_offset += BIT(cbor_short_size - CBOR_LONG_SIZE_U8);
+
+ if (cbor_object_size < array_offset)
+ return -EFAULT;
+
+ array_len_p = &cbor_object[1];
+
+ switch (cbor_short_size) {
+ case CBOR_SHORT_SIZE_MAX_VALUE: /* short encoding */
+ array_len = cbor_short_size;
+ break;
+ case CBOR_LONG_SIZE_U8:
+ array_len = *(u8 *)array_len_p;
+ break;
+ case CBOR_LONG_SIZE_U16:
+ array_len = be16_to_cpup((__be16 *)array_len_p);
+ break;
+ case CBOR_LONG_SIZE_U32:
+ array_len = be32_to_cpup((__be32 *)array_len_p);
+ break;
+ case CBOR_LONG_SIZE_U64:
+ array_len = be64_to_cpup((__be64 *)array_len_p);
+ break;
+ }
+
+ if (cbor_object_size < array_offset)
+ return -EFAULT;
+
+ if (cbor_object_size - array_offset < array_len)
+ return -EFAULT;
+
+ if (array_len > INT_MAX)
+ return -EFAULT;
+
+ *cbor_array = cbor_object + array_offset;
+ return array_len;
+}
+
+/* Copy the request of a raw message to kernel space */
+static int fill_req_raw(struct nsm *nsm, struct nsm_data_req *req,
+ struct nsm_raw *raw)
+{
+ /* Verify the user input size. */
+ if (raw->request.len > sizeof(req->data))
+ return -EMSGSIZE;
+
+ /* Copy the request payload */
+ if (copy_from_user(req->data, u64_to_user_ptr(raw->request.addr),
+ raw->request.len))
+ return -EFAULT;
+
+ req->len = raw->request.len;
+
+ return 0;
+}
+
+/* Copy the response of a raw message back to user-space */
+static int parse_resp_raw(struct nsm *nsm, struct nsm_data_resp *resp,
+ struct nsm_raw *raw)
+{
+ /* Truncate any message that does not fit. */
+ raw->response.len = min_t(u64, raw->response.len, resp->len);
+
+ /* Copy the response content to user space */
+ if (copy_to_user(u64_to_user_ptr(raw->response.addr),
+ resp->data, raw->response.len))
+ return -EFAULT;
+
+ return 0;
+}
+
+/* Virtqueue interrupt handler */
+static void nsm_vq_callback(struct virtqueue *vq)
+{
+ struct nsm *nsm = vq->vdev->priv;
+
+ complete(&nsm->cmd_done);
+}
+
+/* Forward a message to the NSM device and wait for the response from it */
+static int nsm_sendrecv_msg_locked(struct nsm *nsm)
+{
+ struct device *dev = &nsm->vdev->dev;
+ struct scatterlist sg_in, sg_out;
+ struct nsm_msg *msg = &nsm->msg;
+ struct virtqueue *vq = nsm->vq;
+ unsigned int len;
+ void *queue_buf;
+ bool kicked;
+ int rc;
+
+ /* Initialize scatter-gather lists with request and response buffers. */
+ sg_init_one(&sg_out, msg->req.data, msg->req.len);
+ sg_init_one(&sg_in, msg->resp.data, sizeof(msg->resp.data));
+
+ init_completion(&nsm->cmd_done);
+ /* Add the request buffer (read by the device). */
+ rc = virtqueue_add_outbuf(vq, &sg_out, 1, msg->req.data, GFP_KERNEL);
+ if (rc)
+ return rc;
+
+ /* Add the response buffer (written by the device). */
+ rc = virtqueue_add_inbuf(vq, &sg_in, 1, msg->resp.data, GFP_KERNEL);
+ if (rc)
+ goto cleanup;
+
+ kicked = virtqueue_kick(vq);
+ if (!kicked) {
+ /* Cannot kick the virtqueue. */
+ rc = -EIO;
+ goto cleanup;
+ }
+
+ /* If the kick succeeded, wait for the device's response. */
+ if (!wait_for_completion_io_timeout(&nsm->cmd_done,
+ msecs_to_jiffies(NSM_DEFAULT_TIMEOUT_MSECS))) {
+ rc = -ETIMEDOUT;
+ goto cleanup;
+ }
+
+ queue_buf = virtqueue_get_buf(vq, &len);
+ if (!queue_buf || (queue_buf != msg->req.data)) {
+ dev_err(dev, "wrong request buffer.");
+ rc = -ENODATA;
+ goto cleanup;
+ }
+
+ queue_buf = virtqueue_get_buf(vq, &len);
+ if (!queue_buf || (queue_buf != msg->resp.data)) {
+ dev_err(dev, "wrong response buffer.");
+ rc = -ENODATA;
+ goto cleanup;
+ }
+
+ msg->resp.len = len;
+
+ rc = 0;
+
+cleanup:
+ if (rc) {
+ /* Clean the virtqueue. */
+ while (virtqueue_get_buf(vq, &len) != NULL)
+ ;
+ }
+
+ return rc;
+}
+
+static int fill_req_get_random(struct nsm *nsm, struct nsm_data_req *req)
+{
+ /*
+ * 69 # text(9)
+ * 47657452616E646F6D # "GetRandom"
+ */
+ const u8 request[] = { CBOR_TYPE_TEXT + strlen("GetRandom"),
+ 'G', 'e', 't', 'R', 'a', 'n', 'd', 'o', 'm' };
+
+ memcpy(req->data, request, sizeof(request));
+ req->len = sizeof(request);
+
+ return 0;
+}
+
+static int parse_resp_get_random(struct nsm *nsm, struct nsm_data_resp *resp,
+ void *out, size_t max)
+{
+ /*
+ * A1 # map(1)
+ * 69 # text(9) - Name of field
+ * 47657452616E646F6D # "GetRandom"
+ * A1 # map(1) - The field itself
+ * 66 # text(6)
+ * 72616E646F6D # "random"
+ * # The rest of the response is random data
+ */
+ const u8 response[] = { CBOR_TYPE_MAP + 1,
+ CBOR_TYPE_TEXT + strlen("GetRandom"),
+ 'G', 'e', 't', 'R', 'a', 'n', 'd', 'o', 'm',
+ CBOR_TYPE_MAP + 1,
+ CBOR_TYPE_TEXT + strlen("random"),
+ 'r', 'a', 'n', 'd', 'o', 'm' };
+ struct device *dev = &nsm->vdev->dev;
+ u8 *rand_data = NULL;
+ u8 *resp_ptr = resp->data;
+ u64 resp_len = resp->len;
+ int rc;
+
+ if ((resp->len < sizeof(response) + 1) ||
+ (memcmp(resp_ptr, response, sizeof(response)) != 0)) {
+ dev_err(dev, "Invalid response for GetRandom");
+ return -EFAULT;
+ }
+
+ resp_ptr += sizeof(response);
+ resp_len -= sizeof(response);
+
+ rc = cbor_object_get_array(resp_ptr, resp_len, &rand_data);
+ if (rc < 0) {
+ dev_err(dev, "GetRandom: Invalid CBOR encoding\n");
+ return rc;
+ }
+
+ rc = min_t(size_t, rc, max);
+ memcpy(out, rand_data, rc);
+
+ return rc;
+}
+
+/*
+ * HwRNG implementation
+ */
+static int nsm_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ struct nsm *nsm = hwrng_to_nsm(rng);
+ struct device *dev = &nsm->vdev->dev;
+ int rc = 0;
+
+ /* NSM always needs to wait for a response */
+ if (!wait)
+ return 0;
+
+ mutex_lock(&nsm->lock);
+
+ rc = fill_req_get_random(nsm, &nsm->msg.req);
+ if (rc != 0)
+ goto out;
+
+ rc = nsm_sendrecv_msg_locked(nsm);
+ if (rc != 0)
+ goto out;
+
+ rc = parse_resp_get_random(nsm, &nsm->msg.resp, data, max);
+ if (rc < 0)
+ goto out;
+
+ dev_dbg(dev, "RNG: returning rand bytes = %d", rc);
+out:
+ mutex_unlock(&nsm->lock);
+ return rc;
+}
+
+static long nsm_dev_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ void __user *argp = u64_to_user_ptr((u64)arg);
+ struct nsm *nsm = file_to_nsm(file);
+ struct nsm_raw raw;
+ int r = 0;
+
+ if (cmd != NSM_IOCTL_RAW)
+ return -EINVAL;
+
+ if (_IOC_SIZE(cmd) != sizeof(raw))
+ return -EINVAL;
+
+ /* Copy user argument struct to kernel argument struct */
+ r = -EFAULT;
+ if (copy_from_user(&raw, argp, _IOC_SIZE(cmd)))
+ goto out;
+
+ mutex_lock(&nsm->lock);
+
+ /* Convert kernel argument struct to device request */
+ r = fill_req_raw(nsm, &nsm->msg.req, &raw);
+ if (r)
+ goto out;
+
+ /* Send message to NSM and read reply */
+ r = nsm_sendrecv_msg_locked(nsm);
+ if (r)
+ goto out;
+
+ /* Parse device response into kernel argument struct */
+ r = parse_resp_raw(nsm, &nsm->msg.resp, &raw);
+ if (r)
+ goto out;
+
+ /* Copy kernel argument struct back to user argument struct */
+ r = -EFAULT;
+ if (copy_to_user(argp, &raw, sizeof(raw)))
+ goto out;
+
+ r = 0;
+
+out:
+ mutex_unlock(&nsm->lock);
+ return r;
+}
+
+static int nsm_device_init_vq(struct virtio_device *vdev)
+{
+ struct virtqueue *vq = virtio_find_single_vq(vdev,
+ nsm_vq_callback, "nsm.vq.0");
+ struct nsm *nsm = vdev->priv;
+
+ if (IS_ERR(vq))
+ return PTR_ERR(vq);
+
+ nsm->vq = vq;
+
+ return 0;
+}
+
+static const struct file_operations nsm_dev_fops = {
+ .unlocked_ioctl = nsm_dev_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+};
+
+/* Handler for probing the NSM device */
+static int nsm_device_probe(struct virtio_device *vdev)
+{
+ struct device *dev = &vdev->dev;
+ struct nsm *nsm;
+ int rc;
+
+ nsm = devm_kzalloc(&vdev->dev, sizeof(*nsm), GFP_KERNEL);
+ if (!nsm)
+ return -ENOMEM;
+
+ vdev->priv = nsm;
+ nsm->vdev = vdev;
+
+ rc = nsm_device_init_vq(vdev);
+ if (rc) {
+ dev_err(dev, "queue failed to initialize: %d.\n", rc);
+ goto err_init_vq;
+ }
+
+ mutex_init(&nsm->lock);
+
+ /* Register as hwrng provider */
+ nsm->hwrng = (struct hwrng) {
+ .read = nsm_rng_read,
+ .name = "nsm-hwrng",
+ .quality = 1000,
+ };
+
+ rc = hwrng_register(&nsm->hwrng);
+ if (rc) {
+ dev_err(dev, "RNG initialization error: %d.\n", rc);
+ goto err_hwrng;
+ }
+
+ /* Register /dev/nsm device node */
+ nsm->misc = (struct miscdevice) {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "nsm",
+ .fops = &nsm_dev_fops,
+ .mode = 0666,
+ };
+
+ rc = misc_register(&nsm->misc);
+ if (rc) {
+ dev_err(dev, "misc device registration error: %d.\n", rc);
+ goto err_misc;
+ }
+
+ return 0;
+
+err_misc:
+ hwrng_unregister(&nsm->hwrng);
+err_hwrng:
+ vdev->config->del_vqs(vdev);
+err_init_vq:
+ return rc;
+}
+
+/* Handler for removing the NSM device */
+static void nsm_device_remove(struct virtio_device *vdev)
+{
+ struct nsm *nsm = vdev->priv;
+
+ hwrng_unregister(&nsm->hwrng);
+
+ vdev->config->del_vqs(vdev);
+ misc_deregister(&nsm->misc);
+}
+
+/* NSM device configuration structure */
+static struct virtio_driver virtio_nsm_driver = {
+ .feature_table = 0,
+ .feature_table_size = 0,
+ .feature_table_legacy = 0,
+ .feature_table_size_legacy = 0,
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .id_table = id_table,
+ .probe = nsm_device_probe,
+ .remove = nsm_device_remove,
+};
+
+module_virtio_driver(virtio_nsm_driver);
+MODULE_DEVICE_TABLE(virtio, id_table);
+MODULE_DESCRIPTION("Virtio NSM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/ocxl/afu_irq.c b/drivers/misc/ocxl/afu_irq.c
index a06920b7e049..36f7379b8e2d 100644
--- a/drivers/misc/ocxl/afu_irq.c
+++ b/drivers/misc/ocxl/afu_irq.c
@@ -57,7 +57,7 @@ EXPORT_SYMBOL_GPL(ocxl_irq_set_handler);
static irqreturn_t afu_irq_handler(int virq, void *data)
{
- struct afu_irq *irq = (struct afu_irq *) data;
+ struct afu_irq *irq = data;
trace_ocxl_afu_irq_receive(virq);
diff --git a/drivers/misc/ocxl/context.c b/drivers/misc/ocxl/context.c
index 7f83116ae11a..cded7d1caf32 100644
--- a/drivers/misc/ocxl/context.c
+++ b/drivers/misc/ocxl/context.c
@@ -55,7 +55,7 @@ EXPORT_SYMBOL_GPL(ocxl_context_alloc);
*/
static void xsl_fault_error(void *data, u64 addr, u64 dsisr)
{
- struct ocxl_context *ctx = (struct ocxl_context *) data;
+ struct ocxl_context *ctx = data;
mutex_lock(&ctx->xsl_error_lock);
ctx->xsl_error.addr = addr;
diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c
index ac69b7f361f5..7eb74711ac96 100644
--- a/drivers/misc/ocxl/file.c
+++ b/drivers/misc/ocxl/file.c
@@ -184,7 +184,7 @@ static irqreturn_t irq_handler(void *private)
{
struct eventfd_ctx *ev_ctx = private;
- eventfd_signal(ev_ctx, 1);
+ eventfd_signal(ev_ctx);
return IRQ_HANDLED;
}
diff --git a/drivers/misc/ocxl/link.c b/drivers/misc/ocxl/link.c
index c06c699c0e7b..03402203cacd 100644
--- a/drivers/misc/ocxl/link.c
+++ b/drivers/misc/ocxl/link.c
@@ -188,7 +188,7 @@ ack:
static irqreturn_t xsl_fault_handler(int irq, void *data)
{
- struct ocxl_link *link = (struct ocxl_link *) data;
+ struct ocxl_link *link = data;
struct spa *spa = link->spa;
u64 dsisr, dar, pe_handle;
struct pe_data *pe_data;
@@ -483,7 +483,7 @@ static void release_xsl(struct kref *ref)
void ocxl_link_release(struct pci_dev *dev, void *link_handle)
{
- struct ocxl_link *link = (struct ocxl_link *) link_handle;
+ struct ocxl_link *link = link_handle;
mutex_lock(&links_list_lock);
kref_put(&link->ref, release_xsl);
@@ -540,7 +540,7 @@ int ocxl_link_add_pe(void *link_handle, int pasid, u32 pidr, u32 tidr,
void (*xsl_err_cb)(void *data, u64 addr, u64 dsisr),
void *xsl_err_data)
{
- struct ocxl_link *link = (struct ocxl_link *) link_handle;
+ struct ocxl_link *link = link_handle;
struct spa *spa = link->spa;
struct ocxl_process_element *pe;
int pe_handle, rc = 0;
@@ -630,7 +630,7 @@ EXPORT_SYMBOL_GPL(ocxl_link_add_pe);
int ocxl_link_update_pe(void *link_handle, int pasid, __u16 tid)
{
- struct ocxl_link *link = (struct ocxl_link *) link_handle;
+ struct ocxl_link *link = link_handle;
struct spa *spa = link->spa;
struct ocxl_process_element *pe;
int pe_handle, rc;
@@ -666,7 +666,7 @@ int ocxl_link_update_pe(void *link_handle, int pasid, __u16 tid)
int ocxl_link_remove_pe(void *link_handle, int pasid)
{
- struct ocxl_link *link = (struct ocxl_link *) link_handle;
+ struct ocxl_link *link = link_handle;
struct spa *spa = link->spa;
struct ocxl_process_element *pe;
struct pe_data *pe_data;
@@ -752,7 +752,7 @@ EXPORT_SYMBOL_GPL(ocxl_link_remove_pe);
int ocxl_link_irq_alloc(void *link_handle, int *hw_irq)
{
- struct ocxl_link *link = (struct ocxl_link *) link_handle;
+ struct ocxl_link *link = link_handle;
int irq;
if (atomic_dec_if_positive(&link->irq_available) < 0)
@@ -771,7 +771,7 @@ EXPORT_SYMBOL_GPL(ocxl_link_irq_alloc);
void ocxl_link_free_irq(void *link_handle, int hw_irq)
{
- struct ocxl_link *link = (struct ocxl_link *) link_handle;
+ struct ocxl_link *link = link_handle;
xive_native_free_irq(hw_irq);
atomic_inc(&link->irq_available);
diff --git a/drivers/misc/ocxl/main.c b/drivers/misc/ocxl/main.c
index ef73cf35dda2..658974143c3c 100644
--- a/drivers/misc/ocxl/main.c
+++ b/drivers/misc/ocxl/main.c
@@ -7,7 +7,7 @@
static int __init init_ocxl(void)
{
- int rc = 0;
+ int rc;
if (!tlbie_capable)
return -EINVAL;
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index af519088732d..c38a6083f0a7 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -28,14 +28,14 @@
#define DRV_MODULE_NAME "pci-endpoint-test"
#define IRQ_TYPE_UNDEFINED -1
-#define IRQ_TYPE_LEGACY 0
+#define IRQ_TYPE_INTX 0
#define IRQ_TYPE_MSI 1
#define IRQ_TYPE_MSIX 2
#define PCI_ENDPOINT_TEST_MAGIC 0x0
#define PCI_ENDPOINT_TEST_COMMAND 0x4
-#define COMMAND_RAISE_LEGACY_IRQ BIT(0)
+#define COMMAND_RAISE_INTX_IRQ BIT(0)
#define COMMAND_RAISE_MSI_IRQ BIT(1)
#define COMMAND_RAISE_MSIX_IRQ BIT(2)
#define COMMAND_READ BIT(3)
@@ -183,8 +183,8 @@ static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
bool res = true;
switch (type) {
- case IRQ_TYPE_LEGACY:
- irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY);
+ case IRQ_TYPE_INTX:
+ irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_INTX);
if (irq < 0)
dev_err(dev, "Failed to get Legacy interrupt\n");
break;
@@ -244,7 +244,7 @@ static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
fail:
switch (irq_type) {
- case IRQ_TYPE_LEGACY:
+ case IRQ_TYPE_INTX:
dev_err(dev, "Failed to request IRQ %d for Legacy\n",
pci_irq_vector(pdev, i));
break;
@@ -263,6 +263,15 @@ fail:
return false;
}
+static const u32 bar_test_pattern[] = {
+ 0xA0A0A0A0,
+ 0xA1A1A1A1,
+ 0xA2A2A2A2,
+ 0xA3A3A3A3,
+ 0xA4A4A4A4,
+ 0xA5A5A5A5,
+};
+
static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
enum pci_barno barno)
{
@@ -280,26 +289,27 @@ static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
size = 0x4;
for (j = 0; j < size; j += 4)
- pci_endpoint_test_bar_writel(test, barno, j, 0xA0A0A0A0);
+ pci_endpoint_test_bar_writel(test, barno, j,
+ bar_test_pattern[barno]);
for (j = 0; j < size; j += 4) {
val = pci_endpoint_test_bar_readl(test, barno, j);
- if (val != 0xA0A0A0A0)
+ if (val != bar_test_pattern[barno])
return false;
}
return true;
}
-static bool pci_endpoint_test_legacy_irq(struct pci_endpoint_test *test)
+static bool pci_endpoint_test_intx_irq(struct pci_endpoint_test *test)
{
u32 val;
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
- IRQ_TYPE_LEGACY);
+ IRQ_TYPE_INTX);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
- COMMAND_RAISE_LEGACY_IRQ);
+ COMMAND_RAISE_INTX_IRQ);
val = wait_for_completion_timeout(&test->irq_raised,
msecs_to_jiffies(1000));
if (!val)
@@ -385,7 +395,7 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
if (use_dma)
flags |= FLAG_USE_DMA;
- if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
+ if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
dev_err(dev, "Invalid IRQ type option\n");
goto err;
}
@@ -521,7 +531,7 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test,
if (use_dma)
flags |= FLAG_USE_DMA;
- if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
+ if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
dev_err(dev, "Invalid IRQ type option\n");
goto err;
}
@@ -621,7 +631,7 @@ static bool pci_endpoint_test_read(struct pci_endpoint_test *test,
if (use_dma)
flags |= FLAG_USE_DMA;
- if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
+ if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
dev_err(dev, "Invalid IRQ type option\n");
goto err;
}
@@ -691,7 +701,7 @@ static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
struct pci_dev *pdev = test->pdev;
struct device *dev = &pdev->dev;
- if (req_irq_type < IRQ_TYPE_LEGACY || req_irq_type > IRQ_TYPE_MSIX) {
+ if (req_irq_type < IRQ_TYPE_INTX || req_irq_type > IRQ_TYPE_MSIX) {
dev_err(dev, "Invalid IRQ type option\n");
return false;
}
@@ -737,8 +747,8 @@ static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
goto ret;
ret = pci_endpoint_test_bar(test, bar);
break;
- case PCITEST_LEGACY_IRQ:
- ret = pci_endpoint_test_legacy_irq(test);
+ case PCITEST_INTX_IRQ:
+ ret = pci_endpoint_test_intx_irq(test);
break;
case PCITEST_MSI:
case PCITEST_MSIX:
@@ -801,7 +811,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
test->irq_type = IRQ_TYPE_UNDEFINED;
if (no_msi)
- irq_type = IRQ_TYPE_LEGACY;
+ irq_type = IRQ_TYPE_INTX;
data = (struct pci_endpoint_test_data *)ent->driver_data;
if (data) {
@@ -860,7 +870,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, test);
- id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL);
+ id = ida_alloc(&pci_endpoint_test_ida, GFP_KERNEL);
if (id < 0) {
err = id;
dev_err(dev, "Unable to get id\n");
@@ -907,7 +917,7 @@ err_kfree_test_name:
kfree(test->name);
err_ida_remove:
- ida_simple_remove(&pci_endpoint_test_ida, id);
+ ida_free(&pci_endpoint_test_ida, id);
err_iounmap:
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
@@ -943,7 +953,7 @@ static void pci_endpoint_test_remove(struct pci_dev *pdev)
misc_deregister(&test->miscdev);
kfree(misc_device->name);
kfree(test->name);
- ida_simple_remove(&pci_endpoint_test_ida, id);
+ ida_free(&pci_endpoint_test_ida, id);
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
if (test->bar[bar])
pci_iounmap(pdev, test->bar[bar]);
diff --git a/drivers/misc/pvpanic/pvpanic-mmio.c b/drivers/misc/pvpanic/pvpanic-mmio.c
index 9715798acce3..f3f2113a54a7 100644
--- a/drivers/misc/pvpanic/pvpanic-mmio.c
+++ b/drivers/misc/pvpanic/pvpanic-mmio.c
@@ -7,16 +7,15 @@
* Copyright (C) 2021 Oracle.
*/
+#include <linux/device.h>
+#include <linux/err.h>
#include <linux/io.h>
-#include <linux/kernel.h>
+#include <linux/ioport.h>
#include <linux/kexec.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/types.h>
-#include <linux/slab.h>
-
-#include <uapi/misc/pvpanic.h>
#include "pvpanic.h"
diff --git a/drivers/misc/pvpanic/pvpanic-pci.c b/drivers/misc/pvpanic/pvpanic-pci.c
index 689af4c28c2a..9ad20e82785b 100644
--- a/drivers/misc/pvpanic/pvpanic-pci.c
+++ b/drivers/misc/pvpanic/pvpanic-pci.c
@@ -5,17 +5,13 @@
* Copyright (C) 2021 Oracle.
*/
-#include <linux/kernel.h>
+#include <linux/errno.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/types.h>
-#include <linux/slab.h>
-
-#include <uapi/misc/pvpanic.h>
#include "pvpanic.h"
-#define PCI_VENDOR_ID_REDHAT 0x1b36
#define PCI_DEVICE_ID_REDHAT_PVPANIC 0x0011
MODULE_AUTHOR("Mihai Carabas <mihai.carabas@oracle.com>");
diff --git a/drivers/misc/pvpanic/pvpanic.c b/drivers/misc/pvpanic/pvpanic.c
index 305b367e0ce3..df3457ce1cb1 100644
--- a/drivers/misc/pvpanic/pvpanic.c
+++ b/drivers/misc/pvpanic/pvpanic.c
@@ -8,16 +8,20 @@
*/
#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/gfp_types.h>
#include <linux/io.h>
-#include <linux/kernel.h>
#include <linux/kexec.h>
+#include <linux/kstrtox.h>
+#include <linux/limits.h>
+#include <linux/list.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/platform_device.h>
#include <linux/panic_notifier.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/sysfs.h>
#include <linux/types.h>
-#include <linux/cdev.h>
-#include <linux/list.h>
#include <uapi/misc/pvpanic.h>
diff --git a/drivers/misc/pvpanic/pvpanic.h b/drivers/misc/pvpanic/pvpanic.h
index 46ffb10438ad..a42fa760eed5 100644
--- a/drivers/misc/pvpanic/pvpanic.h
+++ b/drivers/misc/pvpanic/pvpanic.h
@@ -8,6 +8,11 @@
#ifndef PVPANIC_H_
#define PVPANIC_H_
+#include <linux/compiler_types.h>
+
+struct attribute_group;
+struct device;
+
int devm_pvpanic_probe(struct device *dev, void __iomem *base);
extern const struct attribute_group *pvpanic_dev_groups[];
diff --git a/drivers/misc/vmw_vmci/vmci_handle_array.c b/drivers/misc/vmw_vmci/vmci_handle_array.c
index de7fee7ead1b..681b3500125a 100644
--- a/drivers/misc/vmw_vmci/vmci_handle_array.c
+++ b/drivers/misc/vmw_vmci/vmci_handle_array.c
@@ -8,12 +8,6 @@
#include <linux/slab.h>
#include "vmci_handle_array.h"
-static size_t handle_arr_calc_size(u32 capacity)
-{
- return VMCI_HANDLE_ARRAY_HEADER_SIZE +
- capacity * sizeof(struct vmci_handle);
-}
-
struct vmci_handle_arr *vmci_handle_arr_create(u32 capacity, u32 max_capacity)
{
struct vmci_handle_arr *array;
@@ -25,7 +19,7 @@ struct vmci_handle_arr *vmci_handle_arr_create(u32 capacity, u32 max_capacity)
capacity = min((u32)VMCI_HANDLE_ARRAY_DEFAULT_CAPACITY,
max_capacity);
- array = kmalloc(handle_arr_calc_size(capacity), GFP_ATOMIC);
+ array = kmalloc(struct_size(array, entries, capacity), GFP_ATOMIC);
if (!array)
return NULL;
@@ -51,8 +45,8 @@ int vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr,
struct vmci_handle_arr *new_array;
u32 capacity_bump = min(array->max_capacity - array->capacity,
array->capacity);
- size_t new_size = handle_arr_calc_size(array->capacity +
- capacity_bump);
+ size_t new_size = struct_size(array, entries,
+ size_add(array->capacity, capacity_bump));
if (array->size >= array->max_capacity)
return VMCI_ERROR_NO_MEM;
diff --git a/drivers/misc/vmw_vmci/vmci_handle_array.h b/drivers/misc/vmw_vmci/vmci_handle_array.h
index 96193f85be5b..27a38b97e8a8 100644
--- a/drivers/misc/vmw_vmci/vmci_handle_array.h
+++ b/drivers/misc/vmw_vmci/vmci_handle_array.h
@@ -17,17 +17,11 @@ struct vmci_handle_arr {
u32 max_capacity;
u32 size;
u32 pad;
- struct vmci_handle entries[];
+ struct vmci_handle entries[] __counted_by(capacity);
};
-#define VMCI_HANDLE_ARRAY_HEADER_SIZE \
- offsetof(struct vmci_handle_arr, entries)
/* Select a default capacity that results in a 64 byte sized array */
#define VMCI_HANDLE_ARRAY_DEFAULT_CAPACITY 6
-/* Make sure that the max array size can be expressed by a u32 */
-#define VMCI_HANDLE_ARRAY_MAX_CAPACITY \
- ((U32_MAX - VMCI_HANDLE_ARRAY_HEADER_SIZE - 1) / \
- sizeof(struct vmci_handle))
struct vmci_handle_arr *vmci_handle_arr_create(u32 capacity, u32 max_capacity);
void vmci_handle_arr_destroy(struct vmci_handle_arr *array);