Release develop 251225

This commit is contained in:
hongyi
2025-12-25 15:40:45 +08:00
parent 876ebfd3c5
commit e31acb1b2d
19 changed files with 574 additions and 475 deletions

View File

@@ -478,30 +478,9 @@
slew-rate = <0>;
};
};
can0_pins: can0-0 {
tx-pins {
pins = "GPIO0_25";
function = "can0";
bias-disable;
drive-strength = <5>;
input-disable;
input-schmitt-disable;
slew-rate = <1>;
};
rx-pins {
pins = "GPIO0_24";
function = "can0";
bias-disable;
drive-strength = <1>;
input-enable;
input-schmitt-enable;
slew-rate = <0>;
};
};
pcie_x1_pins: pcie_x1-1 {
pcie_x1-pins {
pins = "GPIO0_18", "GPIO0_19", "GPIO0_20", "GPIO0_21";
pins = "GPIO0_25";
function = "pcie_x1";
bias-disable;
drive-strength = <7>;
@@ -512,7 +491,7 @@
};
pcie_x4_pins: pcie_x4-1 {
pcie_x4-pins {
pins = "GPIO0_28", "GPIO0_29", "GPIO0_31";
pins = "GPIO0_29";
function = "pcie_x4";
bias-disable;
drive-strength = <7>;
@@ -1046,11 +1025,6 @@
pinctrl-0 = <&pwm1_pins>;
};
&can0 {
pinctrl-names = "default";
pinctrl-0 = <&can0_pins>;
};
&hdmi_tx {
pinctrl-names = "default";
pinctrl-0 = <&hdmi_pins>;

View File

@@ -1366,58 +1366,34 @@
resolution = <12>;
};
&vp_dfmu_iommu {
status = "disabled";
&iommu {
status = "okay";
};
&vp_dfmu_mt {
status = "disabled";
};
&npu_dfmu_iommu {
status = "disabled";
};
&npu_dfmu_mt {
status = "disabled";
};
&vi_dfmu_iommu {
status = "disabled";
};
&vi_dfmu_mt {
status = "disabled";
};
&vo_dfmu_iommu {
status = "disabled";
};
&vo_dfmu_mt {
status = "disabled";
};
&peri1_dfmu_iommu {
status = "okay";
};
&peri1_dfmu_mt {
status = "disabled";
};
&pcie_dfmu_iommu {
status = "disabled";
};
&pcie_dfmu_mt {
status = "disabled";
};
&usb_dfmu_iommu {
status = "okay";
};
&usb_dfmu_mt {
status = "disabled";
};

View File

@@ -282,58 +282,30 @@
status = "disabled";
};
&vp_dfmu_iommu {
status = "disabled";
};
&vp_dfmu_mt {
status = "disabled";
};
&npu_dfmu_iommu {
status = "disabled";
};
&npu_dfmu_mt {
status = "disabled";
};
&vi_dfmu_iommu {
status = "disabled";
};
&vi_dfmu_mt {
status = "disabled";
};
&vo_dfmu_iommu {
status = "disabled";
};
&vo_dfmu_mt {
status = "disabled";
};
&peri1_dfmu_iommu {
status = "disabled";
};
&peri1_dfmu_mt {
status = "disabled";
};
&pcie_dfmu_iommu {
status = "disabled";
};
&pcie_dfmu_mt {
status = "disabled";
};
&usb_dfmu_iommu {
status = "disabled";
};
&usb_dfmu_mt {
status = "disabled";
};

View File

@@ -54,6 +54,10 @@
status = "disabled";
};
&can0 {
status = "disabled";
};
&can1 {
status = "disabled";
};
@@ -175,58 +179,34 @@
};
/* dfmu/iommu */
&vp_dfmu_iommu {
status = "disabled";
&iommu {
status = "okay";
};
&vp_dfmu_mt {
status = "disabled";
};
&npu_dfmu_iommu {
status = "disabled";
};
&npu_dfmu_mt {
status = "disabled";
};
&vi_dfmu_iommu {
status = "disabled";
};
&vi_dfmu_mt {
status = "disabled";
};
&vo_dfmu_iommu {
status = "disabled";
};
&vo_dfmu_mt {
status = "disabled";
};
&peri1_dfmu_iommu {
status = "okay";
};
&peri1_dfmu_mt {
status = "disabled";
};
&pcie_dfmu_iommu {
status = "disabled";
};
&pcie_dfmu_mt {
status = "disabled";
};
&usb_dfmu_iommu {
status = "okay";
};
&usb_dfmu_mt {
status = "disabled";
};

View File

@@ -869,39 +869,6 @@
power-domains = <&power_top>;
#power-domain-cells = <0>;
id = <A210_PD_PERI1>;
clocks = <&clk_peri PERI1_GMAC0_ACLK_EN>, <&clk_peri PERI1_GMAC0_HCLK_EN>, <&clk_peri PERI1_GMAC1_ACLK_EN>,
<&clk_peri PERI1_GMAC1_HCLK_EN>, <&clk_peri PERI1_I2C0_IC_CLK_EN>, <&clk_peri PERI1_I2C0_PCLK_EN>,
<&clk_peri PERI1_I2C1_IC_CLK_EN>, <&clk_peri PERI1_I2C1_PCLK_EN>, <&clk_peri PERI1_I2C2_IC_CLK_EN>,
<&clk_peri PERI1_I2C2_PCLK_EN>, <&clk_peri PERI1_I2S0_PCLK_EN>, <&clk_peri PERI1_I2S0_SRC_CLK_EN>,
<&clk_peri PERI1_PWM0_CCLK_EN>, <&clk_peri PERI1_PWM0_PCLK_EN>, <&clk_peri PERI1_SPI0_PCLK_EN>,
<&clk_peri PERI1_SPI0_SSI_CLK_EN>, <&clk_peri PERI1_UART1_PCLK_EN>, <&clk_peri PERI1_UART1_SCLK_EN>,
<&clk_peri PERI1_UART2_PCLK_EN>, <&clk_peri PERI1_UART2_SCLK_EN>, <&clk_peri PERI1_UART3_PCLK_EN>,
<&clk_peri PERI1_UART3_SCLK_EN>, <&clk_peri PERI1_X2H_GMAC0_ACLK_EN>, <&clk_peri PERI1_X2H_GMAC0_HCLK_EN>,
<&clk_peri PERI1_X2H_GMAC1_ACLK_EN>, <&clk_peri PERI1_X2H_GMAC1_HCLK_EN>, <&clk_peri PERI1_CAN0_HIRES_CLK_EN>,
<&clk_peri PERI1_CAN0_OSC_CLK_EN>, <&clk_peri PERI1_CAN0_PCLK_EN>, <&clk_peri PERI1_CAN1_HIRES_CLK_EN>,
<&clk_peri PERI1_CAN1_OSC_CLK_EN>, <&clk_peri PERI1_CAN1_PCLK_EN>, <&clk_peri PERI1_PDM0_MCLK_EN>,
<&clk_peri PERI1_PDM0_PCLK_EN>, <&clk_peri PERI1_TDM0_PCLK_EN>, <&clk_peri PERI1_TDM0_SCLK_EN>,
<&clk_peri PERI1_X2H_GMAC2_ACLK_EN>, <&clk_peri PERI1_X2H_GMAC2_HCLK_EN>, <&clk_peri PERI1_GMAC2_ACLK_EN>,
<&clk_peri PERI1_GMAC2_HCLK_EN>, <&clk_peri PERI1_ZGMAC_X2X_ACLK_EN>;
resets = <&rst PERI1_GMAC0_ARST>, <&rst PERI1_GMAC0_HRST>, <&rst PERI1_GMAC1_ARST>,
<&rst PERI1_GMAC1_HRST>, <&rst PERI1_GPIO0_DBRST>, <&rst PERI1_GPIO0_PRST>,
<&rst PERI1_GPIO1_DBRST>, <&rst PERI1_GPIO1_PRST>, <&rst PERI1_I2C0_IC_RST>,
<&rst PERI1_I2C0_PRST>, <&rst PERI1_I2C1_IC_RST>, <&rst PERI1_I2C1_PRST>,
<&rst PERI1_I2C2_IC_RST>, <&rst PERI1_I2C2_PRST>, <&rst PERI1_I2S0_PRST>,
<&rst PERI1_PWM0_CRST>,
<&rst PERI1_PWM0_PRST>, <&rst PERI1_QSPI0_PRST>, <&rst PERI1_QSPI0_SSI_RST>,
<&rst PERI1_SPI0_PRST>, <&rst PERI1_SPI0_SSI_RST>, <&rst PERI1_UART0_PRST>,
<&rst PERI1_UART0_S_RST>, <&rst PERI1_UART1_PRST>, <&rst PERI1_UART1_S_RST>,
<&rst PERI1_UART2_PRST>, <&rst PERI1_UART2_S_RST>, <&rst PERI1_UART3_PRST>,
<&rst PERI1_UART3_S_RST>, <&rst PERI1_X2H_GMAC0_ARST>, <&rst PERI1_X2H_GMAC0_HRST>,
<&rst PERI1_X2H_GMAC1_ARST>, <&rst PERI1_X2H_GMAC1_HRST>, <&rst PERI1_PDM0_MRST>,
<&rst PERI1_PDM0_PRST>, <&rst PERI1_TDM0_RST>, <&rst PERI1_CAN0_IPG_PE_RST>,
<&rst PERI1_CAN0_IPG_RST>, <&rst PERI1_CAN0_IPG_SOFT_RST>, <&rst PERI1_CAN0_IPG_TS_RST>,
<&rst PERI1_CAN0_PRST>, <&rst PERI1_CAN1_IPG_PE_RST>, <&rst PERI1_CAN1_IPG_RST>,
<&rst PERI1_CAN1_IPG_SOFT_RST>, <&rst PERI1_CAN1_IPG_TS_RST>, <&rst PERI1_CAN1_PRST>,
<&rst PERI1_CHIP_DBG_ARST>, <&rst PERI1_CHIP_DBG_CRST>, <&rst PERI1_CHIP_DBG_PRST>,
<&rst PERI1_GMAC_CRST>, <&rst PERI1_X2H_GMAC2_ARST>, <&rst PERI1_X2H_GMAC2_HRST>,
<&rst PERI1_ZGMAC_X2X_ARST>;
iopmps = <&device_aon_iopmp>, <&device_chip_dbg_iopmp> ,<&device_peri1_iommu_iopmp>,
<&device_gmac_0_iopmp>, <&device_gmac_1_iopmp>, <&device_gmac_2_iopmp>,
<&device_peri1_mt_iopmp>;

View File

@@ -388,7 +388,7 @@
clocks = <&clk_peri PERI3_DMAC_ACLK_EN>, <&clk_peri PERI3_DMAC_HCLK_EN>;
clock-names = "core-clk", "cfgr-clk";
power-domains = <&power_peri3>;
//iommus = <&pcie_dfmu_iommu DEVID_DIE0_DMAC_AP>;
//iommus = <&iommu DEVID_DIE0_DMAC_AP>;
#dma-cells = <1>;
dma-channels = <16>;
snps,block-size = <65536 65536 65536 65536 65536 65536 65536 65536
@@ -414,7 +414,7 @@
snps,dma-masters = <1>;
snps,data-width = <4>;
snps,axi-max-burst-len = <16>;
//iommus = <&pcie_dfmu_iommu DEVID_DIE0_TEE_DMAC>;
//iommus = <&iommu DEVID_DIE0_TEE_DMAC>;
status = "okay";
};
@@ -636,7 +636,7 @@
<&clk_peri PERI3_EMMC_HCLK_EN>;
clock-names = "core", "bus";
power-domains = <&power_peri3>;
//iommus = <&pcie_dfmu_iommu DEVID_DIE0_EMMC>;
//iommus = <&iommu DEVID_DIE0_EMMC>;
status = "okay";
};
@@ -650,7 +650,7 @@
<&clk_peri PERI3_EMMC_HCLK_EN>;
clock-names = "core", "bus";
power-domains = <&power_peri3>;
//iommus = <&pcie_dfmu_iommu DEVID_DIE0_SD>;
//iommus = <&iommu DEVID_DIE0_SD>;
status = "okay";
};
@@ -686,7 +686,7 @@
snps,axi-config = <&stmmac_axi_setup>;
snps,tso;
zhihe,gmacsys = <&gmac0_sys>;
// iommus = <&peri1_dfmu_iommu DEVID_DIE0_GMAC_0>;
// iommus = <&iommu DEVID_DIE0_GMAC_0>;
status = "okay";
mdio0: mdio {
@@ -712,7 +712,7 @@
snps,axi-config = <&stmmac_axi_setup>;
snps,tso;
zhihe,gmacsys = <&gmac1_sys>;
// iommus = <&peri1_dfmu_iommu DEVID_DIE0_GMAC_1>;
// iommus = <&iommu DEVID_DIE0_GMAC_1>;
status = "okay";
mdio1: mdio {
@@ -1483,7 +1483,7 @@
reg-io-width = <4>;
dr_mode = "host";
power-domains = <&power_usb>;
//iommus = <&usb_dfmu_iommu DEVID_DIE0_USB3_0>;
//iommus = <&iommu DEVID_DIE0_USB3_0>;
snps,usb3_lpm_capable;
snps,usb_sofitpsync;
status = "okay";
@@ -1517,7 +1517,7 @@
maximum-speed = "high-speed";
dr_mode = "host";
power-domains = <&power_usb>;
// iommus = <&usb_dfmu_iommu DEVID_DIE0_USB2_0>;
// iommus = <&iommu DEVID_DIE0_USB2_0>;
snps,need-phy-for-wake;
status = "okay";
};
@@ -1532,7 +1532,7 @@
maximum-speed = "high-speed";
dr_mode = "host";
power-domains = <&power_usb>;
// iommus = <&usb_dfmu_iommu DEVID_DIE0_USB2_1>;
// iommus = <&iommu DEVID_DIE0_USB2_1>;
snps,need-phy-for-wake;
status = "okay";
};
@@ -1755,7 +1755,7 @@
<&clk_pcie PCIE_RP_GEN3X1_PCLK_EN>;
clock-names = "e16phy_clk","gen3x1_aux_clk","gen3x1_slv_clk","gen3x1_mst_clk","gen3x1_pclk";
power-domains = <&power_pcie1>;
//iommus = <&pcie_dfmu_iommu DEVID_DIE0_PCIE_1>;
//iommus = <&iommu DEVID_DIE0_PCIE_1>;
status = "okay";
};
@@ -1803,7 +1803,7 @@
<&clk_pcie SATA_GEN3X2_ACLK_EN>;
clock-names = "pmalive", "rxoob", "aclk";
power-domains = <&power_sata>;
//iommus = <&pcie_dfmu_iommu DEVID_DIE0_SATA_0>;
//iommus = <&iommu DEVID_DIE0_SATA_0>;
ports-implemented = <3>;
status = "okay";
@@ -1822,125 +1822,76 @@
};
};
iommu: iommu@0 {
compatible = "zhihe,iommu";
reg = <0x0 0x06372000 0x0 0x400>,
<0x0 0x06B12000 0x0 0x400>,
<0x0 0x07102000 0x0 0x400>,
<0x0 0x06712000 0x0 0x400>,
<0x0 0x02032000 0x0 0x400>,
<0x0 0x0a012000 0x0 0x400>,
<0x0 0x08022000 0x0 0x400>;
reg-names = "vi", "vp", "npu", "vo", "peri1", "pcie", "usb";
clocks = <&clk SW_IOMMU_PTW_330_ACLK_EN>;
clock-names = "iommu_ptw_aclk";
interrupt-parent = <&intc>;
interrupts = <204>, <235>, <66>, <223>, <344>, <174>, <187>;
power-domains = <&power_vi_isp>, <&power_venc>, <&power_vdec>, <&power_npu_ip>,
<&power_vo>, <&power_pcie0>, <&power_pcie1>, <&power_usb>,
<&power_peri1>;
status = "disabled";
#iommu-cells = <1>;
};
vi_dfmu_mt: mt@0x06370000 {
compatible = "zhihe,memtester";
reg = <0x0 0x06370000 0x0 0x400>;
iommus = <&vi_dfmu_iommu DEVID_DIE0_VI_DFMU>;
iommus = <&iommu DEVID_DIE0_VI_DFMU>;
status = "disabled";
};
vi_dfmu_iommu: iommu@0x06372000 {
compatible = "riscv,iommu";
reg = <0x0 0x06372000 0x0 0x400>;
interrupt-parent = <&intc>;
interrupts = <204>;
status = "disabled";
#iommu-cells = <1>;
power-domains = <&power_vi_isp>;
};
vp_dfmu_mt: mt@0x06B10000 {
compatible = "zhihe,memtester";
reg = <0x0 0x06B10000 0x0 0x400>;
iommus = <&vp_dfmu_iommu DEVID_DIE0_VP_DFMU>;
iommus = <&iommu DEVID_DIE0_VP_DFMU>;
status = "disabled";
};
vp_dfmu_iommu: iommu@0x06B12000 {
compatible = "riscv,iommu";
reg = <0x0 0x06B12000 0x0 0x400>;
interrupt-parent = <&intc>;
interrupts = <235>;
status = "disabled";
#iommu-cells = <1>;
power-domains = <&power_vp_wrapper>;
};
npu_dfmu_mt: mt@0x07100000 {
compatible = "zhihe,memtester";
reg = <0x0 0x07100000 0x0 0x400>;
iommus = <&npu_dfmu_iommu DEVID_DIE0_NPU_DFMU>;
iommus = <&iommu DEVID_DIE0_NPU_DFMU>;
status = "disabled";
};
npu_dfmu_iommu: iommu@0x07102000 {
compatible = "riscv,iommu";
reg = <0x0 0x07102000 0x0 0x400>;
interrupt-parent = <&intc>;
interrupts = <66>;
status = "disabled";
#iommu-cells = <1>;
power-domains = <&power_npu_ip>;
};
vo_dfmu_mt: mt@0x06710000 {
compatible = "zhihe,memtester";
reg = <0x0 0x06710000 0x0 0x400>;
iommus = <&vo_dfmu_iommu DEVID_DIE0_VO_DFMU>;
iommus = <&iommu DEVID_DIE0_VO_DFMU>;
status = "disabled";
};
vo_dfmu_iommu: iommu@0x06712000 {
compatible = "riscv,iommu";
reg = <0x0 0x06712000 0x0 0x400>;
interrupt-parent = <&intc>;
interrupts = <223>;
status = "disabled";
#iommu-cells = <1>;
power-domains = <&power_vo>;
};
peri1_dfmu_mt: mt@0x02030000 {
compatible = "zhihe,memtester";
reg = <0x0 0x02030000 0x0 0x400>;
iommus = <&peri1_dfmu_iommu DEVID_DIE0_PERI1_DFMU>;
iommus = <&iommu DEVID_DIE0_PERI1_DFMU>;
status = "disabled";
};
peri1_dfmu_iommu: iommu@0x02032000 {
compatible = "riscv,iommu";
reg = <0x0 0x02032000 0x0 0x400>;
interrupt-parent = <&intc>;
interrupts = <344>;
status = "okay";
#iommu-cells = <1>;
power-domains = <&power_peri1>;
};
pcie_dfmu_mt: mt@0x0a010000 {
compatible = "zhihe,memtester";
reg = <0x0 0x0a010000 0x0 0x400>;
iommus = <&pcie_dfmu_iommu DEVID_DIE0_PCIE_DFMU>;
iommus = <&iommu DEVID_DIE0_PCIE_DFMU>;
status = "disabled";
};
pcie_dfmu_iommu: iommu@0x0a012000 {
compatible = "riscv,iommu";
reg = <0x0 0x0a012000 0x0 0x400>;
interrupt-parent = <&intc>;
interrupts = <174>;
status = "okay";
#iommu-cells = <1>;
power-domains = <&power_top>;
};
usb_dfmu_mt: mt@0x08020000 {
compatible = "zhihe,memtester";
reg = <0x0 0x08020000 0x0 0x400>;
iommus = <&usb_dfmu_iommu DEVID_DIE0_USB_DFMU>;
iommus = <&iommu DEVID_DIE0_USB_DFMU>;
status = "disabled";
};
usb_dfmu_iommu: iommu@0x08022000 {
compatible = "riscv,iommu";
reg = <0x0 0x08022000 0x0 0x400>;
interrupt-parent = <&intc>;
interrupts = <187>;
status = "okay";
#iommu-cells = <1>;
power-domains = <&power_usb>;
};
npu0: vipcore@0x07000000 {
compatible = "verisilicon,vipcore0";
reg = <0x00 0x7000000 0x00 0x10000>;
@@ -1978,9 +1929,9 @@
<&clk_peri TEE_EIP120SIII_CLKEN>,
<&clk_peri TEE_EIP150B_CLKEN>;
clock-names = "120si_clk","120sii_clk","120siii_clk","hclk";
//iommus = <&pcie_dfmu_iommu DEVID_DIE0_TEE_EIP120SI>,
// <&pcie_dfmu_iommu DEVID_DIE0_TEE_EIP120SII>,
// <&pcie_dfmu_iommu DEVID_DIE0_TEE_EIP120SIII>;
//iommus = <&iommu DEVID_DIE0_TEE_EIP120SI>,
// <&iommu DEVID_DIE0_TEE_EIP120SII>,
// <&iommu DEVID_DIE0_TEE_EIP120SIII>;
status = "okay";
};

View File

@@ -353,7 +353,7 @@ CONFIG_DRM_PANEL=y
CONFIG_DRM_PANEL_SIMPLE=m
CONFIG_DRM_PANEL_ILITEK_ILI9881C=m
CONFIG_DRM_PANEL_JADARD_JD9365DA_H3=m
CONFIG_DRM_PANEL_LT8911=m
CONFIG_DRM_PANEL_LT8911=y
CONFIG_DRM_BRIDGE=y
CONFIG_DRM_PANEL_BRIDGE=y
CONFIG_DRM_DW_HDMI=m
@@ -363,8 +363,8 @@ CONFIG_VERISILICON_DW_DP_P100=y
CONFIG_ZHIHE_AUXDISP=y
CONFIG_FB=y
CONFIG_FIRMWARE_EDID=y
CONFIG_BACKLIGHT_CLASS_DEVICE=m
CONFIG_BACKLIGHT_PWM=m
CONFIG_BACKLIGHT_CLASS_DEVICE=y
CONFIG_BACKLIGHT_PWM=y
# CONFIG_VGA_CONSOLE is not set
CONFIG_LOGO=y
CONFIG_SOUND=m
@@ -459,7 +459,6 @@ CONFIG_PM_DEVFREQ_EVENT=y
CONFIG_EXTCON=y
CONFIG_IIO=y
CONFIG_IIO_SW_DEVICE=y
CONFIG_THEAD_TH1520_ADC=m
CONFIG_PWM=y
CONFIG_PWM_THEAD=y
CONFIG_PHY_DW_DPHY=y

View File

@@ -926,7 +926,7 @@ static int lt8911_dsi_probe(struct mipi_dsi_device *dsi)
ctx->desc = &lt8911_panel_data;
ret = lt8911_parse_dt(ctx);
if (ret) {
DBG_FUNC("%s: failed to parse device tree\n", __func__);
dev_err(&dsi->dev, "%s: failed to parse device tree\n", __func__);
return ret;
}
@@ -939,9 +939,7 @@ static int lt8911_dsi_probe(struct mipi_dsi_device *dsi)
//ctx->panel_data->set_dsi(ctx->dsi);
drm_panel_init(&ctx->panel, &dsi->dev, &panel_funcs, DRM_MODE_CONNECTOR_DSI);
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
drm_panel_of_backlight(&ctx->panel);
drm_panel_add(&ctx->panel);
@@ -952,11 +950,10 @@ static int lt8911_dsi_probe(struct mipi_dsi_device *dsi)
ret = register_pm_notifier(&ctx->pm_notify);
if (ret)
DBG_FUNC("register_pm_notifier failed: %d\n", ret);
dev_err(&dsi->dev, "register_pm_notifier failed: %d\n", ret);
ret = mipi_dsi_attach(dsi);
if (ret < 0)
{
if (ret < 0) {
drm_panel_remove(&ctx->panel);
}

View File

@@ -1 +1 @@
obj-$(CONFIG_RISCV_IOMMU) += iommu.o iommu-platform.o iommu-sysfs.o io_pgtable.o
obj-$(CONFIG_RISCV_IOMMU) += iommu.o iommu-platform.o io_pgtable.o

View File

@@ -141,7 +141,7 @@ static int riscv_iommu_map_pages(struct io_pgtable_ops *ops,
pte_t pte_val;
pgprot_t pte_prot;
dev_dbg(domain->iommu->dev, "%s enter iova=0x%lx, phys=0x%llx pgsize=0x%lx pgcount=0x%lx\n", __func__, iova, phys, pgsize, pgcount);
dev_dbg(domain->iommus->dev, "%s enter iova=0x%lx, phys=0x%llx pgsize=0x%lx pgcount=0x%lx\n", __func__, iova, phys, pgsize, pgcount);
if (domain->domain.type == IOMMU_DOMAIN_BLOCKED)
return -ENODEV;

View File

@@ -11,6 +11,9 @@
#include <linux/kernel.h>
#include <linux/of_platform.h>
#include <linux/bitfield.h>
#include <linux/pm_runtime.h>
#include <linux/pm_domain.h>
#include <linux/clk.h>
#include "iommu-bits.h"
#include "iommu.h"
@@ -20,90 +23,148 @@
static int riscv_iommu_platform_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct zh_iommu_device *iommus = NULL;
struct riscv_iommu_device *iommu = NULL;
struct resource *res = NULL;
struct device_node *np = dev->of_node;
u32 fctl = 0;
int irq = 0;
int ret = 0;
int count;
iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
if (!iommu)
iommus = devm_kzalloc(dev, sizeof(*iommus), GFP_KERNEL);
if (!iommus)
return -ENOMEM;
iommus->dev = dev;
mutex_init(&iommus->lock);
INIT_LIST_HEAD(&iommus->iommus);
INIT_LIST_HEAD(&iommus->groups);
dev_set_drvdata(dev, iommus);
dev_set_name(dev, "%s", "zhihe,iommu");
iommu->dev = dev;
dev_set_drvdata(dev, iommu);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(dev, "could not find resource for register region\n");
iommus->iommu_ptw_aclk = devm_clk_get(dev, "iommu_ptw_aclk");
if (IS_ERR(iommus->iommu_ptw_aclk)) {
dev_err(dev, "failed to get iommu_ptw_aclk\n");
return -EINVAL;
}
ret = clk_prepare_enable(iommus->iommu_ptw_aclk);
if (ret < 0) {
dev_err(dev, "could not prepare or enable iommu_ptw_aclk\n");
clk_disable_unprepare(iommus->iommu_ptw_aclk);
return -EINVAL;
}
iommu->reg = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(iommu->reg)) {
ret = dev_err_probe(dev, PTR_ERR(iommu->reg),
"could not map register region\n");
goto fail;
count = of_count_phandle_with_args(np, "power-domains", "#power-domain-cells");
if (count > 0) {
iommus->pd_devs = devm_kcalloc(dev, count, sizeof(*iommus->pd_devs), GFP_KERNEL);
if (!iommus->pd_devs)
return -ENOMEM;
iommus->num_pds = count;
for (int i = 0; i < count; i++) {
iommus->pd_devs[i] = dev_pm_domain_attach_by_id(dev, i);
if (IS_ERR(iommus->pd_devs[i]))
return PTR_ERR(iommus->pd_devs[i]);
device_link_add(dev, iommus->pd_devs[i], DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS);
}
}
iommu->reg_phys = res->start;
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
ret = -ENODEV;
for (int i = 0; i < pdev->num_resources; i++) {
const char *name;
/* Sanity check: Did we get the whole register space ? */
if ((res->end - res->start + 1) < RISCV_IOMMU_REG_SIZE) {
dev_err(dev, "device region smaller than register file (0x%llx)\n",
res->end - res->start);
goto fail;
iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
if (!iommu)
return -ENOMEM;
iommu->dev = dev;
INIT_LIST_HEAD(&iommu->list);
res = platform_get_resource(pdev, IORESOURCE_MEM, i);
if (!res) {
dev_err(dev, "could not find resource for register region\n");
return -EINVAL;
}
iommu->reg = devm_platform_get_and_ioremap_resource(pdev, i, &res);
if (IS_ERR(iommu->reg)) {
ret = dev_err_probe(dev, PTR_ERR(iommu->reg),
"could not map register region\n");
goto fail;
}
of_property_read_string_index(np, "reg-names", i, &name);
iommu->name = kstrdup(name, GFP_KERNEL);
iommu->reg_phys = res->start;
ret = -ENODEV;
/* Sanity check: Did we get the whole register space ? */
if ((res->end - res->start + 1) < RISCV_IOMMU_REG_SIZE) {
dev_err(dev, "device region smaller than register file (0x%llx)\n",
res->end - res->start);
goto fail;
}
iommu->cap = riscv_iommu_readq(iommu, RISCV_IOMMU_REG_CAP);
if (iommu->cap != A210_IOMMU_CAP) {
dev_err_probe(dev, -ENODEV,
"IOMMU:%s Capacity Reg=0x%llx Error.Check clock power reset!\n", iommu->name, iommu->cap);
goto fail;
}
/* For now we only support WSIs until we have AIA support */
ret = FIELD_GET(RISCV_IOMMU_CAP_IGS, iommu->cap);
if (ret == RISCV_IOMMU_CAP_IGS_MSI) {
dev_err(dev, "IOMMU only supports MSIs\n");
goto fail;
}
/* Parse IRQ assignment */
irq = platform_get_irq(pdev, i);
if (irq > 0)
iommu->irq = irq;
else {
dev_err(dev, "no IRQ provided for iommu\n");
goto fail;
}
/* Make sure fctl.WSI is set */
fctl = riscv_iommu_readl(iommu, RISCV_IOMMU_REG_FCTL);
fctl |= RISCV_IOMMU_FCTL_WSI;
riscv_iommu_writel(iommu, RISCV_IOMMU_REG_FCTL, fctl);
/* Parse Queue lengts */
ret = of_property_read_u32(pdev->dev.of_node, "cmdq_len", &iommu->cmdq_len);
if (!ret)
dev_info(dev, "command queue length set to %i\n", iommu->cmdq_len);
ret = of_property_read_u32(pdev->dev.of_node, "fltq_len", &iommu->fltq_len);
if (!ret)
dev_info(dev, "fault/event queue length set to %i\n", iommu->fltq_len);
ret = riscv_iommu_init(iommu);
if (ret) {
dev_err(dev, "riscv_iommu_init failed %d", ret);
goto fail;
}
list_add_tail(&iommu->list, &iommus->iommus);
}
iommu->cap = riscv_iommu_readq(iommu, RISCV_IOMMU_REG_CAP);
if (iommu->cap != A210_IOMMU_CAP) {
dev_err_probe(dev, -ENODEV,
"IOMMU Capacity Reg=0x%llx Error.Check clock power reset!\n", iommu->cap);
goto fail;
}
/* For now we only support WSIs until we have AIA support */
ret = FIELD_GET(RISCV_IOMMU_CAP_IGS, iommu->cap);
if (ret == RISCV_IOMMU_CAP_IGS_MSI) {
dev_err(dev, "IOMMU only supports MSIs\n");
goto fail;
}
/* Parse IRQ assignment */
irq = platform_get_irq(pdev, 0);
if (irq > 0)
iommu->irq = irq;
else {
dev_err(dev, "no IRQ provided for iommu\n");
goto fail;
}
/* Make sure fctl.WSI is set */
fctl = riscv_iommu_readl(iommu, RISCV_IOMMU_REG_FCTL);
fctl |= RISCV_IOMMU_FCTL_WSI;
riscv_iommu_writel(iommu, RISCV_IOMMU_REG_FCTL, fctl);
/* Parse Queue lengts */
ret = of_property_read_u32(pdev->dev.of_node, "cmdq_len", &iommu->cmdq_len);
if (!ret)
dev_info(dev, "command queue length set to %i\n", iommu->cmdq_len);
ret = of_property_read_u32(pdev->dev.of_node, "fltq_len", &iommu->fltq_len);
if (!ret)
dev_info(dev, "fault/event queue length set to %i\n", iommu->fltq_len);
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
ret = riscv_iommu_init(iommu);
riscv_iommu_register(iommus);
//clear tlb
if (!ret) {
list_for_each_entry(iommu, &iommus->iommus, list) {
//clear tlb with a tmp domain
struct riscv_iommu_domain iommu_domain;
iommu_domain.iommu = iommu;
iommu_domain.iommus = iommus;
iommu_domain.pscid = 0;
iommu_domain.domain.ops = iommu->iommu.ops->default_domain_ops;
iommu_domain.domain.ops = iommus->iommu.ops->default_domain_ops;
iommu_flush_iotlb_all(&iommu_domain.domain);
}
@@ -123,7 +184,7 @@ static void riscv_iommu_platform_shutdown(struct platform_device *pdev)
}
static const struct of_device_id riscv_iommu_of_match[] = {
{.compatible = "riscv,iommu",},
{.compatible = "zhihe,iommu",},
{},
};
@@ -131,7 +192,7 @@ MODULE_DEVICE_TABLE(of, riscv_iommu_of_match);
static struct platform_driver riscv_iommu_platform_driver = {
.driver = {
.name = "riscv,iommu",
.name = "zhihe,iommu",
.of_match_table = riscv_iommu_of_match,
.suppress_bind_attrs = true,
},

View File

@@ -61,10 +61,6 @@ static DEVICE_ATTR_RO(address);
return len; \
}
extern void riscv_iommu_flush_iotlb_range(struct iommu_domain *iommu_domain,
unsigned long *start, unsigned long *end,
size_t *pgsize);
#define ATTR_WR_REG64(name, offset) \
ssize_t reg_ ## name ## _store(struct device *dev, \
struct device_attribute *attr, \
@@ -73,21 +69,13 @@ extern void riscv_iommu_flush_iotlb_range(struct iommu_domain *iommu_domain,
struct riscv_iommu_device *iommu = sysfs_dev_to_iommu(dev); \
unsigned long long val; \
int ret; \
unsigned long end = 0xffffffff; \
size_t pgsize = 0x1000; \
ret = kstrtoull(buf, 0, &val); \
if (ret) \
return ret; \
if (offset == 0xdeadbeef) { \
unsigned long tmp = (unsigned long)val; \
riscv_iommu_flush_iotlb_range(&iommu->domain->domain, &tmp, &end, &pgsize); \
} else { \
riscv_iommu_writeq(iommu, offset, val); \
} \
riscv_iommu_writeq(iommu, offset, val); \
return len; \
}
#define ATTR_RO_REG32(name, offset) \
static ATTR_RD_REG32(name, offset); \
static DEVICE_ATTR_RO(reg_ ## name)
@@ -139,7 +127,6 @@ ATTR_RW_REG64(iohpmctr_4, RISCV_IOMMU_REG_IOHPMCTR(3));
ATTR_RW_REG64(iohpmctr_5, RISCV_IOMMU_REG_IOHPMCTR(4));
ATTR_RW_REG64(iohpmctr_6, RISCV_IOMMU_REG_IOHPMCTR(5));
ATTR_RW_REG64(iohpmctr_7, RISCV_IOMMU_REG_IOHPMCTR(6));
ATTR_RW_REG64(invalid_range, 0xdeadbeef);
static struct attribute *riscv_iommu_attrs[] = {
&dev_attr_address.attr,
@@ -176,7 +163,6 @@ static struct attribute *riscv_iommu_attrs[] = {
&dev_attr_reg_iohpmevt_6.attr,
&dev_attr_reg_iohpmctr_7.attr,
&dev_attr_reg_iohpmevt_7.attr,
&dev_attr_reg_invalid_range.attr,
NULL,
};

View File

@@ -23,6 +23,7 @@
#include <linux/irqdomain.h>
#include <linux/platform_device.h>
#include <linux/dma-map-ops.h>
#include <linux/delay.h>
#include <asm/page.h>
#include "../dma-iommu.h"
@@ -31,6 +32,8 @@
#include <asm/csr.h>
#include <asm/delay.h>
#include <dt-bindings/iommu/zh-iommu.h>
MODULE_DESCRIPTION("IOMMU driver for RISC-V architected Ziommu implementations");
MODULE_AUTHOR("Tomasz Jeznach <tjeznach@rivosinc.com>");
MODULE_AUTHOR("Nick Kossifidis <mick@ics.forth.gr>");
@@ -326,86 +329,89 @@ static inline void riscv_iommu_cmd_iodir_set_did(struct riscv_iommu_command *cmd
}
/* TODO: Convert into lock-less MPSC implementation. */
static bool riscv_iommu_post_sync(struct riscv_iommu_device *iommu,
static void riscv_iommu_post_sync(struct zh_iommu_device *iommus,
struct riscv_iommu_command *cmd, bool sync)
{
u32 head, tail, next, last;
unsigned long flags;
struct riscv_iommu_device *iommu;
spin_lock_irqsave(&iommu->cq_lock, flags);
head = riscv_iommu_readl(iommu, RISCV_IOMMU_REG_CQH) & (iommu->cmdq.cnt - 1);
tail = riscv_iommu_readl(iommu, RISCV_IOMMU_REG_CQT) & (iommu->cmdq.cnt - 1);
last = iommu->cmdq.lui;
if (tail != last) {
spin_unlock_irqrestore(&iommu->cq_lock, flags);
/*
* FIXME: This is a workaround for dropped MMIO writes/reads on QEMU platform.
* While debugging of the problem is still ongoing, this provides
* a simple impolementation of try-again policy.
* Will be changed to lock-less algorithm in the feature.
*/
dev_dbg(iommu->dev, "IOMMU CQT: %x != %x (1st)\n", last, tail);
list_for_each_entry(iommu, &iommus->iommus, list) {
spin_lock_irqsave(&iommu->cq_lock, flags);
tail =
riscv_iommu_readl(iommu, RISCV_IOMMU_REG_CQT) & (iommu->cmdq.cnt - 1);
head = riscv_iommu_readl(iommu, RISCV_IOMMU_REG_CQH) & (iommu->cmdq.cnt - 1);
tail = riscv_iommu_readl(iommu, RISCV_IOMMU_REG_CQT) & (iommu->cmdq.cnt - 1);
last = iommu->cmdq.lui;
if (tail != last) {
spin_unlock_irqrestore(&iommu->cq_lock, flags);
dev_dbg(iommu->dev, "IOMMU CQT: %x != %x (2nd)\n", last, tail);
/*
* FIXME: This is a workaround for dropped MMIO writes/reads on QEMU platform.
* While debugging of the problem is still ongoing, this provides
* a simple impolementation of try-again policy.
* Will be changed to lock-less algorithm in the feature.
*/
dev_dbg(iommu->dev, "IOMMU CQT: %x != %x (1st)\n", last, tail);
spin_lock_irqsave(&iommu->cq_lock, flags);
}
}
next = (last + 1) & (iommu->cmdq.cnt - 1);
if (next != head) {
struct riscv_iommu_command *ptr = iommu->cmdq.base;
ptr[last] = *cmd;
dma_wmb();
riscv_iommu_writel(iommu, RISCV_IOMMU_REG_CQT, next);
iommu->cmdq.lui = next;
}
spin_unlock_irqrestore(&iommu->cq_lock, flags);
if (sync && head != next) {
cycles_t start_time = get_cycles();
while (1) {
last = riscv_iommu_readl(iommu, RISCV_IOMMU_REG_CQH) &
(iommu->cmdq.cnt - 1);
if (head < next && last >= next)
break;
if (head > next && last < head && last >= next)
break;
if (RISCV_IOMMU_TIMEOUT < (get_cycles() - start_time)) {
dev_err(iommu->dev, "IOFENCE TIMEOUT\n");
return false;
tail =
riscv_iommu_readl(iommu, RISCV_IOMMU_REG_CQT) & (iommu->cmdq.cnt - 1);
last = iommu->cmdq.lui;
if (tail != last) {
spin_unlock_irqrestore(&iommu->cq_lock, flags);
dev_dbg(iommu->dev, "IOMMU CQT: %x != %x (2nd)\n", last, tail);
spin_lock_irqsave(&iommu->cq_lock, flags);
}
}
next = (last + 1) & (iommu->cmdq.cnt - 1);
if (next != head) {
struct riscv_iommu_command *ptr = iommu->cmdq.base;
ptr[last] = *cmd;
dma_wmb();
riscv_iommu_writel(iommu, RISCV_IOMMU_REG_CQT, next);
iommu->cmdq.lui = next;
}
spin_unlock_irqrestore(&iommu->cq_lock, flags);
if (sync && head != next) {
cycles_t start_time = get_cycles();
while (1) {
last = riscv_iommu_readl(iommu, RISCV_IOMMU_REG_CQH) &
(iommu->cmdq.cnt - 1);
if (head < next && last >= next)
break;
if (head > next && last < head && last >= next)
break;
if (RISCV_IOMMU_TIMEOUT < (get_cycles() - start_time)) {
dev_err(iommu->dev, "IOFENCE TIMEOUT\n");
return;
}
cpu_relax();
}
cpu_relax();
}
}
return next != head;
return;
}
static bool riscv_iommu_post(struct riscv_iommu_device *iommu,
static void riscv_iommu_post(struct zh_iommu_device *iommus,
struct riscv_iommu_command *cmd)
{
return riscv_iommu_post_sync(iommu, cmd, false);
return riscv_iommu_post_sync(iommus, cmd, false);
}
static bool riscv_iommu_iodir_inv_devid(struct riscv_iommu_device *iommu, unsigned devid)
static void riscv_iommu_iodir_inv_devid(struct zh_iommu_device *iommus, unsigned devid)
{
struct riscv_iommu_command cmd;
riscv_iommu_cmd_iodir_inval_ddt(&cmd);
riscv_iommu_cmd_iodir_set_did(&cmd, devid);
return riscv_iommu_post(iommu, &cmd);
return riscv_iommu_post(iommus, &cmd);
}
static bool riscv_iommu_iofence_sync(struct riscv_iommu_device *iommu)
static void riscv_iommu_iofence_sync(struct zh_iommu_device *iommus)
{
struct riscv_iommu_command cmd;
riscv_iommu_cmd_iofence(&cmd);
return riscv_iommu_post_sync(iommu, &cmd, true);
return riscv_iommu_post_sync(iommus, &cmd, true);
}
/* Command queue primary interrupt handler */
@@ -423,7 +429,8 @@ static irqreturn_t riscv_iommu_cmdq_irq_check(int irq, void *data)
static void riscv_iommu_dump_regs(struct riscv_iommu_device *iommu)
{
dev_warn_ratelimited(iommu->dev,
"riscv_iommu_dump_regs: CAP=0x%llx\n FCTL=0x%x\n DDTP=0x%llx\n CQB=0x%llx\n CQH=0x%x\n CQT=0x%x\n CQCSR=0x%x\n",
"%s iommu dump regs: CAP=0x%llx\n FCTL=0x%x\n DDTP=0x%llx\n CQB=0x%llx\n CQH=0x%x\n CQT=0x%x\n CQCSR=0x%x\n",
iommu->name,
riscv_iommu_readq(iommu, RISCV_IOMMU_REG_CAP),
riscv_iommu_readl(iommu, RISCV_IOMMU_REG_FCTL),
riscv_iommu_readq(iommu, RISCV_IOMMU_REG_DDTP),
@@ -447,7 +454,7 @@ static irqreturn_t riscv_iommu_cmdq_process(int irq, void *data)
if (ctrl & (RISCV_IOMMU_CQCSR_CQMF |
RISCV_IOMMU_CQCSR_CMD_TO | RISCV_IOMMU_CQCSR_CMD_ILL)) {
riscv_iommu_queue_ctrl(iommu, &iommu->cmdq, ctrl);
dev_warn_ratelimited(iommu->dev,
dev_err(iommu->dev,
"Command queue error: fault: %d tout: %d err: %d\n",
!!(ctrl & RISCV_IOMMU_CQCSR_CQMF),
!!(ctrl & RISCV_IOMMU_CQCSR_CMD_TO),
@@ -682,31 +689,56 @@ retry:
return;
}
/* find out exactly which iommu hardware ip is used for specific devid */
static struct riscv_iommu_device *riscv_iommu_find_hardware(struct zh_iommu_device *iommus, u32 devid)
{
char name[10];
struct riscv_iommu_device *iommu;
if (devid >= DEVID_DIE0_PERI1_DFMU && devid <= DEVID_DIE0_EMMC)
strcpy(name, "peri1");
else if (devid >= DEVID_DIE0_USB_DFMU && devid <= DEVID_DIE0_USB2_1)
strcpy(name, "usb");
else if (devid >= DEVID_DIE0_PCIE_DFMU && devid <= DEVID_DIE0_TEE_DMAC)
strcpy(name, "pcie");
else if (devid >= DEVID_DIE0_VI_DFMU && devid <= DEVID_DIE0_DW200)
strcpy(name, "vi");
else if (devid >= DEVID_DIE0_VP_DFMU && devid <= DEVID_DIE0_G2D)
strcpy(name, "vp");
else if (devid >= DEVID_DIE0_VO_DFMU && devid <= DEVID_DIE0_PIP_REC)
strcpy(name, "vo");
else if (devid >= DEVID_DIE0_NPU_DFMU && devid <= DEVID_DIE0_NPU)
strcpy(name, "npu");
else
return ERR_PTR(-EINVAL);
list_for_each_entry(iommu, &iommus->iommus, list) {
if (strcmp(name, iommu->name) == 0)
return iommu;
}
return ERR_PTR(-EINVAL);
}
static struct iommu_device *riscv_iommu_probe_device(struct device *dev)
{
struct riscv_iommu_device *iommu;
struct zh_iommu_device *iommus;
struct riscv_iommu_endpoint *ep;
struct iommu_fwspec *fwspec;
int i;
struct device_node *np = dev->of_node;
int i, ret;
fwspec = dev_iommu_fwspec_get(dev);
if (!fwspec || fwspec->ops != &riscv_iommu_ops ||
!fwspec->iommu_fwnode || !fwspec->iommu_fwnode->dev || !fwspec->ids)
!fwspec->iommu_fwnode || !fwspec->iommu_fwnode->dev)
return ERR_PTR(-ENODEV);
iommu = dev_get_drvdata(fwspec->iommu_fwnode->dev);
if (!iommu)
iommus = dev_get_drvdata(fwspec->iommu_fwnode->dev);
if (!iommus)
return ERR_PTR(-ENODEV);
//enable iommu ddt to LV2 mode
int ret = riscv_iommu_enable(iommu, ddt_mode);
if (ret) {
dev_err(dev, "failed to enable iommu LV2, error:%d\n", ret);
return ERR_PTR(ret);
}
if (dev_iommu_priv_get(dev))
return &iommu->iommu;
if (dev_iommu_priv_get(dev)) // in case ep is ready
return &iommus->iommu;
ep = kzalloc(sizeof(*ep), GFP_KERNEL);
if (!ep)
@@ -720,22 +752,37 @@ static struct iommu_device *riscv_iommu_probe_device(struct device *dev)
ep->num_ids = fwspec->num_ids;
ep->domid = 0;
ep->iommu = iommu;
ep->iommu = riscv_iommu_find_hardware(iommus, ep->devids[0]);
if (IS_ERR(ep->iommu)) {
dev_err(dev, "riscv_iommu_find_hardware failed with devid:0x%x", ep->devids[0]);
return ERR_PTR(-EINVAL);
}
ep->iommus = iommus;
ep->dev = dev;
/* Initial DC pointer can be NULL if IOMMU is configured in OFF or BARE mode */
riscv_iommu_get_dc(iommu, ep);
if (of_property_read_u32(np, "iommu-group", &ep->group_id))
ep->group_id = IOMMU_GROUP_PER_DEV;
printk(KERN_CONT "adding device to iommu with devid ");
//enable iommu ddt to LV2 mode
ret = riscv_iommu_enable(ep->iommu, ddt_mode);
if (ret) {
dev_err(dev, "failed to enable iommu LV2, error:%d\n", ret);
return ERR_PTR(ret);
}
/* Initial DC pointer can be NULL if IOMMU is configured in OFF or BARE mode */
riscv_iommu_get_dc(ep->iommu, ep);
printk(KERN_CONT "adding device with devid ");
for (i = 0; i < ep->num_ids; i++) {
printk(KERN_CONT "%i ", ep->devids[i]);
}
printk(KERN_CONT "in domain %i\n", ep->domid);
printk(KERN_CONT "to %s iommu\n", ep->iommu->name);
dev_iommu_priv_set(dev, ep);
riscv_iommu_add_device(iommu, dev);
riscv_iommu_add_device(ep->iommu, dev);
return &iommu->iommu;
return &iommus->iommu;
}
static void riscv_iommu_probe_finalize(struct device *dev)
@@ -763,7 +810,7 @@ static void riscv_iommu_release_device(struct device *dev)
dma_wmb();
ep->dc[i]->fsc = 0ULL;
dma_wmb();
riscv_iommu_iodir_inv_devid(iommu, ep->devids[i]);
riscv_iommu_iodir_inv_devid(ep->iommus, ep->devids[i]);
}
}
@@ -778,9 +825,59 @@ static void riscv_iommu_release_device(struct device *dev)
kfree(ep);
}
static void riscv_iommu_group_release(void *iommu_data)
{
struct riscv_iommu_group *group = iommu_data;
struct zh_iommu_device *iommus = group->iommus;
mutex_lock(&iommus->lock);
list_del(&group->list);
mutex_unlock(&iommus->lock);
}
static char * riscv_iommu_group_names[] = {"IOMMU_GROUP_PER_DEV", "IOMMU_GROUP_VIDEO", "IOMMU_GROUP_NPU"};
static struct iommu_group *riscv_iommu_device_group(struct device *dev)
{
return generic_device_group(dev);
struct riscv_iommu_endpoint *ep = dev_iommu_priv_get(dev);
struct zh_iommu_device *iommus = ep->iommus;
struct riscv_iommu_group *group;
struct iommu_group *grp;
mutex_lock(&iommus->lock);
/* Find if desired group existed in the list of riscv_iommu_device */
list_for_each_entry(group, &iommus->groups, list) {
if (group->group_id == ep->group_id && ep->group_id != IOMMU_GROUP_PER_DEV) {
grp = iommu_group_ref_get(group->group);
mutex_unlock(&iommus->lock);
return grp;
}
}
group = devm_kzalloc(iommus->dev, sizeof(*group), GFP_KERNEL);
if (!group) {
mutex_unlock(&iommus->lock);
return ERR_PTR(-ENOMEM);
}
INIT_LIST_HEAD(&group->list);
group->iommus = iommus;
group->group_id = ep->group_id;
group->group = generic_device_group(dev);
if (IS_ERR(group->group)) {
devm_kfree(iommus->dev, group);
mutex_unlock(&iommus->lock);
return group->group;
}
iommu_group_set_iommudata(group->group, group, riscv_iommu_group_release);
iommu_group_set_name(group->group, riscv_iommu_group_names[group->group_id]);
list_add_tail(&group->list, &iommus->groups);
mutex_unlock(&iommus->lock);
return group->group;
}
/*
@@ -811,7 +908,7 @@ static struct iommu_domain *riscv_iommu_domain_alloc(unsigned type)
domain->pscid = ida_alloc_range(&riscv_iommu_pscids, 1,
RISCV_IOMMU_MAX_PSCID, GFP_KERNEL);
printk("domain alloc %u\n", domain->pscid);
printk("domain->pscid alloc %u\n", domain->pscid);
return &domain->domain;
}
@@ -839,15 +936,15 @@ static void riscv_iommu_domain_free(struct iommu_domain *iommu_domain)
}
static int riscv_iommu_domain_finalize(struct riscv_iommu_domain *domain,
struct riscv_iommu_device *iommu)
struct zh_iommu_device *iommus)
{
struct iommu_domain_geometry *geometry;
/* Domain assigned to another iommu */
if (domain->iommu && domain->iommu != iommu)
if (domain->iommus && domain->iommus != iommus)
return -EINVAL;
/* Domain already initialized */
else if (domain->iommu)
else if (domain->iommus)
return 0;
/*
@@ -871,8 +968,7 @@ static int riscv_iommu_domain_finalize(struct riscv_iommu_domain *domain,
geometry->aperture_end = DMA_BIT_MASK(VA_BITS - 1);
geometry->force_aperture = true;
domain->iommu = iommu;
iommu->domain = domain;
domain->iommus = iommus;
if (domain->domain.type == IOMMU_DOMAIN_IDENTITY)
return 0;
@@ -919,7 +1015,7 @@ static int riscv_iommu_attach_dev(struct iommu_domain *iommu_domain, struct devi
}
/* allocate root pages, initialize io-pgtable ops, etc. */
ret = riscv_iommu_domain_finalize(domain, ep->iommu);
ret = riscv_iommu_domain_finalize(domain, ep->iommus);
if (ret < 0) {
dev_err(dev, "can not finalize domain: %d\n", ret);
mutex_unlock(&ep->lock);
@@ -935,23 +1031,23 @@ static int riscv_iommu_attach_dev(struct iommu_domain *iommu_domain, struct devi
}
for (i = 0; i < ep->num_ids; i++) {
if (!ep->dc[i])
return -ENODEV;
if (!ep->dc[i])
return -ENODEV;
/* S-Stage translation table only. G-Stage not supported. */
val = FIELD_PREP(RISCV_IOMMU_DC_TA_PSCID, domain->pscid);
ep->dc[i]->ta = cpu_to_le64(val);
ep->dc[i]->fsc = cpu_to_le64(riscv_iommu_domain_atp(domain));
/* S-Stage translation table only. G-Stage not supported. */
val = FIELD_PREP(RISCV_IOMMU_DC_TA_PSCID, domain->pscid);
ep->dc[i]->ta = cpu_to_le64(val);
ep->dc[i]->fsc = cpu_to_le64(riscv_iommu_domain_atp(domain));
dma_wmb();
dma_wmb();
/* Mark device context as valid, synchronise device context cache. */
val = RISCV_IOMMU_DC_TC_V;
ep->dc[i]->tc = cpu_to_le64(val);
dma_wmb();
arch_sync_dma_for_device(__pa(ep->dc[i]), SZ_4K, DMA_TO_DEVICE); // flush LV2 DDT
printk("%s flush cache for LV2 DDT pa=0x%lx\n", __func__, __pa(ep->dc[i]));
riscv_iommu_iodir_inv_devid(ep->iommu, ep->devids[i]);
/* Mark device context as valid, synchronise device context cache. */
val = RISCV_IOMMU_DC_TC_V;
ep->dc[i]->tc = cpu_to_le64(val);
dma_wmb();
arch_sync_dma_for_device(__pa(ep->dc[i]), SZ_4K, DMA_TO_DEVICE); // flush LV2 DDT
printk("%s flush cache for LV2 DDT pa=0x%lx\n", __func__, __pa(ep->dc[i]));
riscv_iommu_iodir_inv_devid(ep->iommus, ep->devids[i]);
}
list_add_tail(&ep->domain, &domain->endpoints);
@@ -978,7 +1074,7 @@ void riscv_iommu_flush_iotlb_range(struct iommu_domain *iommu_domain,
return;
/* Domain not attached to an IOMMU! */
BUG_ON(!domain->iommu);
BUG_ON(!domain->iommus);
riscv_iommu_cmd_inval_vma(&cmd);
riscv_iommu_cmd_inval_set_pscid(&cmd, domain->pscid);
@@ -988,12 +1084,12 @@ void riscv_iommu_flush_iotlb_range(struct iommu_domain *iommu_domain,
for (iova = *start; iova <= *end; iova += *pgsize) {
//printk("riscv_iommu_cmd_inval_set_addr iova = 0x%lx\n", iova);
riscv_iommu_cmd_inval_set_addr(&cmd, iova);
riscv_iommu_post(domain->iommu, &cmd);
riscv_iommu_post(domain->iommus, &cmd);
}
} else {
riscv_iommu_post(domain->iommu, &cmd);
riscv_iommu_post(domain->iommus, &cmd);
}
riscv_iommu_iofence_sync(domain->iommu);
riscv_iommu_iofence_sync(domain->iommus);
}
static void riscv_iommu_flush_iotlb_all(struct iommu_domain *iommu_domain)
@@ -1035,7 +1131,7 @@ static int riscv_iommu_map_pages(struct iommu_domain *iommu_domain,
ret = domain->pgtbl.ops.map_pages(&domain->pgtbl.ops, iova, phys,
pgsize, pgcount, prot, gfp, mapped);
dev_dbg(domain->iommu->dev, "LV3 target pa = 0x%llx\n", domain->pgtbl.ops.iova_to_phys(&domain->pgtbl.ops, iova));
dev_dbg(domain->iommus->dev, "LV3 target pa = 0x%llx\n", domain->pgtbl.ops.iova_to_phys(&domain->pgtbl.ops, iova));
return ret;
}
@@ -1136,12 +1232,12 @@ static int riscv_iommu_enable(struct riscv_iommu_device *iommu, unsigned request
}
mode_readback = FIELD_GET(RISCV_IOMMU_DDTP_MODE, ddtp);
dev_info(dev, "mode_readback: %i, mode: %i\n", mode_readback, mode);
dev_info(dev, "%s iommu mode_readback: %i, mode: %i\n", iommu->name, mode_readback, mode);
if (mode_readback != mode)
goto fail;
iommu->ddt_mode = mode;
dev_info(dev, "ddt_mode: %i\n", iommu->ddt_mode);
dev_info(dev, "%s iommu ddt_mode: %i\n", iommu->name, iommu->ddt_mode);
return 0;
fail:
@@ -1179,18 +1275,21 @@ static const struct iommu_ops riscv_iommu_ops = {
.default_domain_ops = &riscv_iommu_domain_ops,
};
void riscv_iommu_remove(struct riscv_iommu_device *iommu)
void riscv_iommu_remove(struct zh_iommu_device *iommus)
{
iommu_device_unregister(&iommu->iommu);
iommu_device_sysfs_remove(&iommu->iommu);
riscv_iommu_enable(iommu, RISCV_IOMMU_DDTP_MODE_OFF);
riscv_iommu_queue_free(iommu, &iommu->fltq);
riscv_iommu_queue_free(iommu, &iommu->cmdq);
struct riscv_iommu_device *iommu;
iommu_device_unregister(&iommus->iommu);
iommu_device_sysfs_remove(&iommus->iommu);
list_for_each_entry(iommu, &iommus->iommus, list) {
riscv_iommu_enable(iommu, RISCV_IOMMU_DDTP_MODE_OFF);
riscv_iommu_queue_free(iommu, &iommu->fltq);
riscv_iommu_queue_free(iommu, &iommu->cmdq);
}
}
int riscv_iommu_init(struct riscv_iommu_device *iommu)
{
struct device *dev = iommu->dev;
int ret;
iommu->eps = RB_ROOT;
@@ -1217,19 +1316,6 @@ int riscv_iommu_init(struct riscv_iommu_device *iommu)
if (ret)
goto fail;
ret = riscv_iommu_sysfs_add(iommu);
if (ret) {
dev_err(dev, "cannot register sysfs interface (%d)\n", ret);
goto fail;
}
ret = iommu_device_register(&iommu->iommu, &riscv_iommu_ops, dev);
if (ret) {
dev_err(dev, "cannot register iommu interface (%d)\n", ret);
iommu_device_sysfs_remove(&iommu->iommu);
goto fail;
}
return 0;
fail:
riscv_iommu_enable(iommu, RISCV_IOMMU_DDTP_MODE_OFF);
@@ -1237,3 +1323,24 @@ int riscv_iommu_init(struct riscv_iommu_device *iommu)
riscv_iommu_queue_free(iommu, &iommu->cmdq);
return ret;
}
int riscv_iommu_register(struct zh_iommu_device *iommus)
{
int ret;
ret = iommu_device_sysfs_add(&iommus->iommu, NULL,
NULL, "zhihe-iommu");
if (ret) {
dev_err(iommus->dev, "cannot register sysfs interface (%d)\n", ret);
return ret;
}
ret = iommu_device_register(&iommus->iommu, &riscv_iommu_ops, iommus->dev);
if (ret) {
dev_err(iommus->dev, "cannot register iommu interface (%d)\n", ret);
iommu_device_sysfs_remove(&iommus->iommu);
return ret;
}
return ret;
}

View File

@@ -46,9 +46,22 @@ enum riscv_queue_ids {
RISCV_IOMMU_PAGE_REQUEST_QUEUE = 2
};
struct riscv_iommu_device {
/* virtual iommu device representing all the real iommu hardware IPs and register to kernel iommu framework */
struct zh_iommu_device {
struct iommu_device iommu; /* iommu core interface */
struct device *dev; /* iommu hardware */
struct list_head groups; /* struct riscv_iommu_group{} existed */
struct list_head iommus; /* struct riscv_iommu_device{} for individual iommu IPs existed*/
struct mutex lock; /* protect modification of member: groups,iommus */
struct device **pd_devs;
int num_pds;
struct clk *iommu_ptw_aclk;
};
/* real iommu hardware IP */
struct riscv_iommu_device {
struct device *dev; /* iommu hardware */
char *name;
/* hardware control register space */
void __iomem *reg;
@@ -79,7 +92,7 @@ struct riscv_iommu_device {
struct rb_root eps;
struct mutex eps_mutex;
struct riscv_iommu_domain *domain;
struct list_head list;
};
struct riscv_iommu_domain {
@@ -90,7 +103,7 @@ struct riscv_iommu_domain {
struct list_head notifiers;
struct mutex lock;
struct mmu_notifier mn;
struct riscv_iommu_device *iommu;
struct zh_iommu_device *iommus;
unsigned mode; /* RIO_ATP_MODE_* enum */
unsigned pscid; /* RISC-V IOMMU PSCID / GSCID */
@@ -104,11 +117,21 @@ struct riscv_iommu_endpoint {
unsigned num_ids;
unsigned domid; /* PCI domain number, segment */
struct rb_node node; /* device tracking node (lookup by devid) */
struct riscv_iommu_device *iommu; /* parent iommu device */
struct zh_iommu_device *iommus; /* virtual iommu device */
struct riscv_iommu_device *iommu; /* real parent iommu device */
struct mutex lock;
struct list_head domain; /* endpoint attached managed domain */
struct riscv_iommu_dc **dc; /* device context pointer entries */
u32 *devids; /* PCI bus:device:function number */
unsigned int group_id; /* desire to join which iommu_group */
};
/* include one instance of struct iommu_group and it's managing tools */
struct riscv_iommu_group {
struct list_head list;
struct zh_iommu_device *iommus;
struct iommu_group *group;
unsigned int group_id;
};
/* Helper functions and macros */
@@ -138,8 +161,9 @@ static inline void riscv_iommu_writeq(struct riscv_iommu_device *iommu,
}
int riscv_iommu_init(struct riscv_iommu_device *iommu);
void riscv_iommu_remove(struct riscv_iommu_device *iommu);
void riscv_iommu_remove(struct zh_iommu_device *iommus);
int riscv_iommu_sysfs_add(struct riscv_iommu_device *iommu);
int riscv_iommu_sysfs_add(struct zh_iommu_device *iommus);
int riscv_iommu_register(struct zh_iommu_device *iommus);
#endif

View File

@@ -112,6 +112,7 @@ int dw_pcie_get_resources(struct dw_pcie *pci)
pci->dbi_base = devm_pci_remap_cfg_resource(pci->dev, res);
if (IS_ERR(pci->dbi_base))
return PTR_ERR(pci->dbi_base);
pci->dbi_phys_addr = res->start;
}
/* DBI2 is mainly useful for the endpoint controller */
@@ -155,6 +156,16 @@ int dw_pcie_get_resources(struct dw_pcie *pci)
}
}
/* ELBI is an optional resource */
if (!pci->elbi_base) {
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
if (res) {
pci->elbi_base = devm_ioremap_resource(pci->dev, res);
if (IS_ERR(pci->elbi_base))
return PTR_ERR(pci->elbi_base);
}
}
/* LLDD is supposed to manually switch the clocks and resets state */
if (dw_pcie_cap_is(pci, REQ_RES)) {
ret = dw_pcie_get_clocks(pci);
@@ -881,7 +892,7 @@ static int dw_pcie_edma_irq_verify(struct dw_pcie *pci)
{
struct platform_device *pdev = to_platform_device(pci->dev);
u16 ch_cnt = pci->edma.ll_wr_cnt + pci->edma.ll_rd_cnt;
char name[6];
char name[15];
int ret;
if (pci->edma.nr_irqs == 1)

View File

@@ -70,6 +70,9 @@
#define LINK_WAIT_IATU 9
/* Synopsys-specific PCIe configuration registers */
#define PCIE_PORT_FORCE 0x708
#define PORT_FORCE_DO_DESKEW_FOR_SRIS BIT(23)
#define PCIE_PORT_AFR 0x70C
#define PORT_AFR_N_FTS_MASK GENMASK(15, 8)
#define PORT_AFR_N_FTS(n) FIELD_PREP(PORT_AFR_N_FTS_MASK, n)
@@ -90,9 +93,13 @@
#define PORT_LINK_MODE_2_LANES PORT_LINK_MODE(0x3)
#define PORT_LINK_MODE_4_LANES PORT_LINK_MODE(0x7)
#define PORT_LINK_MODE_8_LANES PORT_LINK_MODE(0xf)
#define PORT_LINK_MODE_16_LANES PORT_LINK_MODE(0x1f)
#define PCIE_PORT_LANE_SKEW 0x714
#define PORT_LANE_SKEW_INSERT_MASK GENMASK(23, 0)
#define PCIE_PORT_DEBUG0 0x728
#define PORT_LOGIC_LTSSM_STATE_MASK 0x1f
#define PORT_LOGIC_LTSSM_STATE_MASK 0x3f
#define PORT_LOGIC_LTSSM_STATE_L0 0x11
#define PCIE_PORT_DEBUG1 0x72C
#define PCIE_PORT_DEBUG1_LINK_UP BIT(4)
@@ -116,11 +123,31 @@
#define GEN3_RELATED_OFF 0x890
#define GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL BIT(0)
#define GEN3_RELATED_OFF_EQ_PHASE_2_3 BIT(9)
#define GEN3_RELATED_OFF_RXEQ_RGRDLESS_RXTS BIT(13)
#define GEN3_RELATED_OFF_GEN3_EQ_DISABLE BIT(16)
#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT 24
#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK GENMASK(25, 24)
#define GEN3_EQ_CONTROL_OFF 0x8A8
#define GEN3_EQ_CONTROL_OFF_FB_MODE GENMASK(3, 0)
#define GEN3_EQ_CONTROL_OFF_PHASE23_EXIT_MODE BIT(4)
#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC GENMASK(23, 8)
#define GEN3_EQ_CONTROL_OFF_FOM_INC_INITIAL_EVAL BIT(24)
#define GEN3_EQ_FB_MODE_DIR_CHANGE_OFF 0x8AC
#define GEN3_EQ_FMDC_T_MIN_PHASE23 GENMASK(4, 0)
#define GEN3_EQ_FMDC_N_EVALS GENMASK(9, 5)
#define GEN3_EQ_FMDC_MAX_PRE_CURSOR_DELTA GENMASK(13, 10)
#define GEN3_EQ_FMDC_MAX_POST_CURSOR_DELTA GENMASK(17, 14)
#define COHERENCY_CONTROL_1_OFF 0x8E0
#define CFG_MEMTYPE_BOUNDARY_LOW_ADDR_MASK GENMASK(31, 2)
#define CFG_MEMTYPE_VALUE BIT(0)
#define COHERENCY_CONTROL_2_OFF 0x8E4
#define COHERENCY_CONTROL_3_OFF 0x8E8
#define PCIE_PORT_MULTI_LANE_CTRL 0x8C0
#define PORT_MLTI_UPCFG_SUPPORT BIT(7)
@@ -147,11 +174,14 @@
#define PCIE_ATU_TYPE_IO 0x2
#define PCIE_ATU_TYPE_CFG0 0x4
#define PCIE_ATU_TYPE_CFG1 0x5
#define PCIE_ATU_TYPE_MSG 0x10
#define PCIE_ATU_TD BIT(8)
#define PCIE_ATU_FUNC_NUM(pf) ((pf) << 20)
#define PCIE_ATU_REGION_CTRL2 0x004
#define PCIE_ATU_ENABLE BIT(31)
#define PCIE_ATU_BAR_MODE_ENABLE BIT(30)
#define PCIE_ATU_CFG_SHIFT_MODE_ENABLE BIT(28)
#define PCIE_ATU_INHIBIT_PAYLOAD BIT(22)
#define PCIE_ATU_FUNC_NUM_MATCH_EN BIT(19)
#define PCIE_ATU_LOWER_BASE 0x008
#define PCIE_ATU_UPPER_BASE 0x00C
@@ -190,6 +220,24 @@
#define PCIE_PL_CHK_REG_ERR_ADDR 0xB28
/*
* 16.0 GT/s (Gen 4) lane margining register definitions
*/
#define GEN4_LANE_MARGINING_1_OFF 0xB80
#define MARGINING_MAX_VOLTAGE_OFFSET GENMASK(29, 24)
#define MARGINING_NUM_VOLTAGE_STEPS GENMASK(22, 16)
#define MARGINING_MAX_TIMING_OFFSET GENMASK(13, 8)
#define MARGINING_NUM_TIMING_STEPS GENMASK(5, 0)
#define GEN4_LANE_MARGINING_2_OFF 0xB84
#define MARGINING_IND_ERROR_SAMPLER BIT(28)
#define MARGINING_SAMPLE_REPORTING_METHOD BIT(27)
#define MARGINING_IND_LEFT_RIGHT_TIMING BIT(26)
#define MARGINING_IND_UP_DOWN_VOLTAGE BIT(25)
#define MARGINING_VOLTAGE_SUPPORTED BIT(24)
#define MARGINING_MAXLANES GENMASK(20, 16)
#define MARGINING_SAMPLE_RATE_TIMING GENMASK(13, 8)
#define MARGINING_SAMPLE_RATE_VOLTAGE GENMASK(5, 0)
/*
* iATU Unroll-specific register definitions
* From 4.80 core version the address translation will be made by unroll
@@ -222,6 +270,21 @@
#define PCIE_RAS_DES_EVENT_COUNTER_DATA 0xc
/* PTM register definitions */
#define PTM_RES_REQ_CTRL 0x8
#define PTM_RES_CCONTEXT_VALID BIT(0)
#define PTM_REQ_AUTO_UPDATE_ENABLED BIT(0)
#define PTM_REQ_START_UPDATE BIT(1)
#define PTM_LOCAL_LSB 0x10
#define PTM_LOCAL_MSB 0x14
#define PTM_T1_T2_LSB 0x18
#define PTM_T1_T2_MSB 0x1c
#define PTM_T3_T4_LSB 0x28
#define PTM_T3_T4_MSB 0x2c
#define PTM_MASTER_LSB 0x38
#define PTM_MASTER_MSB 0x3c
/*
* The default address offset between dbi_base and atu_base. Root controller
* drivers are not required to initialize atu_base if the offset matches this
@@ -292,8 +355,40 @@ enum dw_pcie_ltssm {
/* Need to align with PCIE_PORT_DEBUG0 bits 0:5 */
DW_PCIE_LTSSM_DETECT_QUIET = 0x0,
DW_PCIE_LTSSM_DETECT_ACT = 0x1,
DW_PCIE_LTSSM_POLL_ACTIVE = 0x2,
DW_PCIE_LTSSM_POLL_COMPLIANCE = 0x3,
DW_PCIE_LTSSM_POLL_CONFIG = 0x4,
DW_PCIE_LTSSM_PRE_DETECT_QUIET = 0x5,
DW_PCIE_LTSSM_DETECT_WAIT = 0x6,
DW_PCIE_LTSSM_CFG_LINKWD_START = 0x7,
DW_PCIE_LTSSM_CFG_LINKWD_ACEPT = 0x8,
DW_PCIE_LTSSM_CFG_LANENUM_WAI = 0x9,
DW_PCIE_LTSSM_CFG_LANENUM_ACEPT = 0xa,
DW_PCIE_LTSSM_CFG_COMPLETE = 0xb,
DW_PCIE_LTSSM_CFG_IDLE = 0xc,
DW_PCIE_LTSSM_RCVRY_LOCK = 0xd,
DW_PCIE_LTSSM_RCVRY_SPEED = 0xe,
DW_PCIE_LTSSM_RCVRY_RCVRCFG = 0xf,
DW_PCIE_LTSSM_RCVRY_IDLE = 0x10,
DW_PCIE_LTSSM_L0 = 0x11,
DW_PCIE_LTSSM_L0S = 0x12,
DW_PCIE_LTSSM_L123_SEND_EIDLE = 0x13,
DW_PCIE_LTSSM_L1_IDLE = 0x14,
DW_PCIE_LTSSM_L2_IDLE = 0x15,
DW_PCIE_LTSSM_L2_WAKE = 0x16,
DW_PCIE_LTSSM_DISABLED_ENTRY = 0x17,
DW_PCIE_LTSSM_DISABLED_IDLE = 0x18,
DW_PCIE_LTSSM_DISABLED = 0x19,
DW_PCIE_LTSSM_LPBK_ENTRY = 0x1a,
DW_PCIE_LTSSM_LPBK_ACTIVE = 0x1b,
DW_PCIE_LTSSM_LPBK_EXIT = 0x1c,
DW_PCIE_LTSSM_LPBK_EXIT_TIMEOUT = 0x1d,
DW_PCIE_LTSSM_HOT_RESET_ENTRY = 0x1e,
DW_PCIE_LTSSM_HOT_RESET = 0x1f,
DW_PCIE_LTSSM_RCVRY_EQ0 = 0x20,
DW_PCIE_LTSSM_RCVRY_EQ1 = 0x21,
DW_PCIE_LTSSM_RCVRY_EQ2 = 0x22,
DW_PCIE_LTSSM_RCVRY_EQ3 = 0x23,
DW_PCIE_LTSSM_UNKNOWN = 0xFFFFFFFF,
};
@@ -383,8 +478,10 @@ struct dw_pcie_ops {
struct dw_pcie {
struct device *dev;
void __iomem *dbi_base;
resource_size_t dbi_phys_addr;
void __iomem *dbi_base2;
void __iomem *atu_base;
void __iomem *elbi_base;
size_t atu_size;
u32 num_ib_windows;
u32 num_ob_windows;

View File

@@ -35,10 +35,6 @@
#define E16PHY_PROTLCOL_REG 0x00000008
#define E16PHY_RES_RTURN_REG 0x00000048
#define LANE_SKEW_OFF 0x00000714
#define GEN3_EQ_CONTROL_OFF 0x000008a8
#define GEN3_EQ_FB_MODE_DIR_CHANGE_OFF 0x000008ac
#define PCIE_EXTENDED_REG0 0x00000154
#define PCIE_EXTENDED_REG1 0x00000158
#define PCIE_EXTENDED_REG2 0x0000015c
@@ -119,11 +115,6 @@ static void __maybe_unused zhihe_pcie_ltssm_disable(struct dw_pcie *pci)
writel(0x1014, pcie->cfg_base + PCIE_GEN3X4_CTRL_REG);
}
static void __maybe_unused zhihe_pcie_stop_link(struct dw_pcie *pci)
{
zhihe_pcie_ltssm_disable(pci);
}
static void zhihe_pcie_wait_linkup(struct zhihe_pcie *pcie)
{
u32 ltssm_stat = 0;
@@ -274,6 +265,13 @@ static void __maybe_unused zhihe_pcie_phy_deinit(struct zhihe_pcie *pcie)
phy_power_off(pcie->phy);
}
static void zhihe_pcie_stop_link(struct dw_pcie *pci)
{
struct zhihe_pcie *zhihe_pcie = dev_get_drvdata(pci->dev);
zhihe_pcie_ltssm_disable(pci);
}
static const struct dw_pcie_ops dw_pcie_ops = {
.stop_link = zhihe_pcie_stop_link,
};
@@ -302,13 +300,13 @@ static int zhihe_pcie_ipctrl_init(struct dw_pcie_rp *pp)
if (pcie->ip_type == PCIE_X4_TYPE) {
/*cfg x4 lane*/
dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, 0x70120);
dw_pcie_writel_dbi(pci, LANE_SKEW_OFF, 0x1c000000);
dw_pcie_writel_dbi(pci, PCIE_PORT_LANE_SKEW, 0x1c000000);
dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, 0xc020071);
dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, 0x304be);
} else {
/*cfg x1 lane*/
dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, 0x10120);
dw_pcie_writel_dbi(pci, LANE_SKEW_OFF, 0x4000000);
dw_pcie_writel_dbi(pci, PCIE_PORT_LANE_SKEW, 0x4000000);
dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, 0xc020071);
dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, 0x101be);
}

View File

@@ -133,7 +133,7 @@ static int a210_pd_power_off(struct generic_pm_domain *domain)
#endif
if (a210_pd->num_clks)
clk_bulk_disable(a210_pd->num_clks, a210_pd->clks);
clk_bulk_disable_unprepare(a210_pd->num_clks, a210_pd->clks);
ret = reset_control_assert(a210_pd->reset);
if (ret)
@@ -152,7 +152,7 @@ static int a210_pd_power_on(struct generic_pm_domain *domain)
return ret;
if (a210_pd->num_clks) {
ret = clk_bulk_enable(a210_pd->num_clks, a210_pd->clks);
ret = clk_bulk_prepare_enable(a210_pd->num_clks, a210_pd->clks);
if (ret)
return ret;
}
@@ -417,12 +417,6 @@ static int a210_add_one_domain(struct platform_device *pdev, struct device_node
}
}
ret = clk_bulk_prepare(a210_pd->num_clks, a210_pd->clks);
if (ret) {
clk_bulk_put(a210_pd->num_clks, a210_pd->clks);
goto clk_fail;
}
}
pd_soc->domains[pd_soc->num_domains++] = a210_pd;

View File

@@ -119,4 +119,9 @@
#define DEVID_DIE1_NPU_DFMU 0x70
#define DEVID_DIE1_NPU 0x71
/* GROUPS */
#define IOMMU_GROUP_PER_DEV 0x00
#define IOMMU_GROUP_VIDEO 0x01
#define IOMMU_GROUP_NPU 0x02
#endif