diff --git a/arch/riscv/boot/dts/spacemit/k1-x-camera-sdk.dtsi b/arch/riscv/boot/dts/spacemit/k1-x-camera-sdk.dtsi index 2680b7ee92c3..81a35b491131 100644 --- a/arch/riscv/boot/dts/spacemit/k1-x-camera-sdk.dtsi +++ b/arch/riscv/boot/dts/spacemit/k1-x-camera-sdk.dtsi @@ -69,12 +69,14 @@ interrupt-names = "ipe-irq"; clocks = <&ccu CLK_CSI>, <&ccu CLK_CCIC_4X>, - <&ccu CLK_ISP_BUS>; - clock-names = "csi_func", "ccic_func", "isp_axi"; + <&ccu CLK_ISP_BUS>, + <&ccu CLK_DPU_MCLK>; + clock-names = "csi_func", "ccic_func", "isp_axi", "dpu_mclk"; resets = <&reset RESET_ISP_AHB>, <&reset RESET_CSI>, - <&reset RESET_CCIC_4X>, <&reset RESET_ISP_CI>; + <&reset RESET_CCIC_4X>, <&reset RESET_ISP_CI>, + <&reset RESET_LCD_MCLK>; reset-names = "isp_ahb_reset", "csi_reset", - "ccic_4x_reset", "isp_ci_reset"; + "ccic_4x_reset", "isp_ci_reset", "mclk_reset"; interconnects = <&dram_range3>; interconnect-names = "dma-mem"; status = "okay"; @@ -89,14 +91,16 @@ interrupt-parent = <&intc>; interrupts = <82>; interrupt-names = "ipe-irq"; - clocks = <&ccu CLK_CSI>, <&ccu CLK_CCIC_4X>, - <&ccu CLK_ISP_BUS>; - clock-names = "csi_func", "ccic_func", - "isp_axi"; + clocks = <&ccu CLK_CSI>, + <&ccu CLK_CCIC_4X>, + <&ccu CLK_ISP_BUS>, + <&ccu CLK_DPU_MCLK>; + clock-names = "csi_func", "ccic_func", "isp_axi", "dpu_mclk"; resets = <&reset RESET_ISP_AHB>, <&reset RESET_CSI>, - <&reset RESET_CCIC_4X>, <&reset RESET_ISP_CI>; + <&reset RESET_CCIC_4X>, <&reset RESET_ISP_CI>, + <&reset RESET_LCD_MCLK>; reset-names = "isp_ahb_reset", "csi_reset", - "ccic_4x_reset", "isp_ci_reset"; + "ccic_4x_reset", "isp_ci_reset", "mclk_reset"; interconnects = <&dram_range3>; interconnect-names = "dma-mem"; status = "okay"; @@ -111,14 +115,16 @@ interrupt-parent = <&intc>; interrupts = <83>; interrupt-names = "ipe-irq"; - clocks = <&ccu CLK_CSI>, <&ccu CLK_CCIC_4X>, - <&ccu CLK_ISP_BUS>; - clock-names = "csi_func", "ccic_func", - "isp_axi"; + clocks = <&ccu CLK_CSI>, + <&ccu CLK_CCIC_4X>, + <&ccu CLK_ISP_BUS>, + <&ccu CLK_DPU_MCLK>; + clock-names = "csi_func", "ccic_func", "isp_axi", "dpu_mclk"; resets = <&reset RESET_ISP_AHB>, <&reset RESET_CSI>, - <&reset RESET_CCIC_4X>, <&reset RESET_ISP_CI>; + <&reset RESET_CCIC_4X>, <&reset RESET_ISP_CI>, + <&reset RESET_LCD_MCLK>; reset-names = "isp_ahb_reset", "csi_reset", - "ccic_4x_reset", "isp_ci_reset"; + "ccic_4x_reset", "isp_ci_reset", "mclk_reset"; interconnects = <&dram_range3>; interconnect-names = "dma-mem"; status = "okay"; diff --git a/drivers/media/platform/spacemit/camera/Makefile b/drivers/media/platform/spacemit/camera/Makefile index f5e8a1d233b4..ae794b255534 100644 --- a/drivers/media/platform/spacemit/camera/Makefile +++ b/drivers/media/platform/spacemit/camera/Makefile @@ -12,6 +12,7 @@ cam_ccic_v2-objs += cam_ccic/ccic_hwreg.o cam_ccic_v2-objs += cam_ccic/csiphy.o cam_ccic_v2-objs += cam_ccic/ccic_drv.o cam_ccic_v2-objs += cam_ccic/dptc_drv.o +cam_ccic_v2-objs += cam_ccic/ccic_vdev.o obj-$(CONFIG_SPACEMIT_K1X_CPP_V2) += cam_cpp_v2.o cam_cpp_v2-objs += cam_cpp/k1x_cpp.o diff --git a/drivers/media/platform/spacemit/camera/cam_ccic/ccic_drv.c b/drivers/media/platform/spacemit/camera/cam_ccic/ccic_drv.c index ff8baf3ac03a..2bcaa1a9f834 100644 --- a/drivers/media/platform/spacemit/camera/cam_ccic/ccic_drv.c +++ b/drivers/media/platform/spacemit/camera/cam_ccic/ccic_drv.c @@ -18,11 +18,13 @@ #include #include #include +#include #include #include #include "ccic_drv.h" #include "ccic_hwreg.h" #include "csiphy.h" +#include "ccic_vdev.h" #ifdef CONFIG_ARCH_ZYNQMP #include "dptc_drv.h" @@ -31,8 +33,19 @@ #define K1X_CCIC_DRV_NAME "k1xccic" +#define CAM_ALIGN(a, b) ({ \ + unsigned int ___tmp1 = (a); \ + unsigned int ___tmp2 = (b); \ + unsigned int ___tmp3 = ___tmp1 % ___tmp2; \ + ___tmp1 /= ___tmp2; \ + if (___tmp3) \ + ___tmp1++; \ + ___tmp1 *= ___tmp2; \ + ___tmp1; \ + }) static LIST_HEAD(ccic_devices); static DEFINE_MUTEX(list_lock); +static void ccic_dma_bh_handler(struct ccic_dma_work_struct *ccic_dma_work); static void ccic_irqmask(struct ccic_ctrl *ctrl, int on) { @@ -131,17 +144,23 @@ static int ccic_config_csi2_dphy(struct ccic_ctrl *ctrl, return ret; } -static int ccic_config_csi2_vc(struct ccic_ctrl *ctrl, int md, u8 vc0, u8 vc1) +static int ccic_config_csi2_vc_dt(struct ccic_ctrl *ctrl, int md, u8 vc0, u8 vc1, u8 dt0, u8 dt1) { int ret = 0; struct ccic_dev *ccic_dev = ctrl->ccic_dev; switch (md) { case CCIC_CSI2VC_NM: /* Normal mode */ + ccic_reg_clear_bit(ccic_dev, REG_CSI2_VCCTRL, CSI2_VCCTRL_DT_ENABLE); + ccic_reg_clear_bit(ccic_dev, REG_CSI2_DT_FLT, CSI2_DT_FLT0_EN); + ccic_reg_clear_bit(ccic_dev, REG_CSI2_DT_FLT, CSI2_DT_FLT1_EN); ccic_reg_write_mask(ccic_dev, REG_CSI2_VCCTRL, CSI2_VCCTRL_MD_NORMAL, CSI2_VCCTRL_MD_MASK); break; case CCIC_CSI2VC_VC: /* Virtual Channel mode */ + ccic_reg_clear_bit(ccic_dev, REG_CSI2_VCCTRL, CSI2_VCCTRL_DT_ENABLE); + ccic_reg_clear_bit(ccic_dev, REG_CSI2_DT_FLT, CSI2_DT_FLT0_EN); + ccic_reg_clear_bit(ccic_dev, REG_CSI2_DT_FLT, CSI2_DT_FLT1_EN); ccic_reg_write_mask(ccic_dev, REG_CSI2_VCCTRL, CSI2_VCCTRL_MD_VC, CSI2_VCCTRL_MD_MASK); ccic_reg_write_mask(ccic_dev, REG_CSI2_VCCTRL, vc0 << 14, @@ -150,9 +169,23 @@ static int ccic_config_csi2_vc(struct ccic_ctrl *ctrl, int md, u8 vc0, u8 vc1) CSI2_VCCTRL_VC1_MASK); break; case CCIC_CSI2VC_DT: /* TODO: Data-Type Interleaving */ - ccic_reg_write_mask(ccic_dev, REG_CSI2_VCCTRL, - CSI2_VCCTRL_MD_DT, CSI2_VCCTRL_MD_MASK); + //ccic_reg_write_mask(ccic_dev, REG_CSI2_VCCTRL, + // CSI2_VCCTRL_MD_DT, CSI2_VCCTRL_MD_MASK); pr_err("csi2 vc mode %d todo\n", md); + ret = -EINVAL; + break; + case CCIC_CSI2VC_VCDT: + ccic_reg_set_bit(ccic_dev, REG_CSI2_VCCTRL, CSI2_VCCTRL_DT_ENABLE); + ccic_reg_set_bit(ccic_dev, REG_CSI2_DT_FLT, CSI2_DT_FLT0_EN); + ccic_reg_set_bit(ccic_dev, REG_CSI2_DT_FLT, CSI2_DT_FLT2_EN); + ccic_reg_write_mask(ccic_dev, REG_CSI2_VCCTRL, vc0 << 14, + CSI2_VCCTRL_VC0_MASK); + ccic_reg_write_mask(ccic_dev, REG_CSI2_VCCTRL, vc1 << 22, + CSI2_VCCTRL_VC1_MASK); + ccic_reg_write_mask(ccic_dev, REG_CSI2_DT_FLT, dt0 << CSI2_DT_FLT0_SHIFT, + CSI2_DT_FLT0_MASK); + ccic_reg_write_mask(ccic_dev, REG_CSI2_DT_FLT, dt1 << CSI2_DT_FLT2_SHIFT, + CSI2_DT_FLT2_MASK); break; default: dev_err(ccic_dev->dev, "invalid csi2 vc mode %d\n", md); @@ -292,10 +325,10 @@ static int axi_set_clock_rates(struct clk *clock) return 0; } -int ccic_dma_clk_enable(struct ccic_dma *dma, int on) +static int ccic_dma_clk_enable(struct ccic_dma *dma, int on) { - struct ccic_dev *ccic = dma->ccic_dev; - struct device *dev = &ccic->pdev->dev; + struct ccic_dev *ccic_dev = dma->ccic_dev; + struct device *dev = &ccic_dev->pdev->dev; int ret; if (on) { @@ -303,30 +336,198 @@ int ccic_dma_clk_enable(struct ccic_dma *dma, int on) if (ret < 0) return ret; - ret = clk_prepare_enable(ccic->axi_clk); + ret = clk_prepare_enable(ccic_dev->axi_clk); if (ret < 0) { pm_runtime_put_sync(dev); return ret; } - reset_control_deassert(ccic->isp_ci_reset); + reset_control_deassert(ccic_dev->isp_ci_reset); - ret = axi_set_clock_rates(ccic->axi_clk); + ret = axi_set_clock_rates(ccic_dev->axi_clk); if (ret < 0) { pm_runtime_put_sync(dev); return ret; } - reset_control_deassert(ccic->isp_ci_reset); + reset_control_deassert(ccic_dev->isp_ci_reset); } else { - clk_disable_unprepare(ccic->axi_clk); - reset_control_assert(ccic->isp_ci_reset); + clk_disable_unprepare(ccic_dev->axi_clk); + reset_control_assert(ccic_dev->isp_ci_reset); pm_runtime_put_sync(dev); } return 0; } +static int ccic_dma_enable(struct ccic_dma *dma_dev, int enable) +{ + struct ccic_dev *ccic_dev = dma_dev->ccic_dev; + + if (enable) { + //ccic_reg_set_bit(ccic_dev, REG_IRQMASK, FRAMEIRQS); + ccic_dma_set_burst(ccic_dev); + /* 0x3c: enable ccic dma */ + ccic_reg_set_bit(ccic_dev, REG_CTRL0, BIT(0)); + ccic_reg_set_bit(ccic_dev, 0x40, BIT(31) | BIT(26)); + ccic_reg_clear_bit(ccic_dev, 0x40, BIT(25)); + } else { + //ccic_reg_clear_bit(ccic_dev, REG_IRQMASK, FRAMEIRQS); + /* 0x3c: disable ccic dma */ + ccic_reg_clear_bit(ccic_dev, REG_CTRL0, BIT(0)); + } + + return 0; +} + +static int ccic_dma_set_fmt(struct ccic_dma *dma_dev, + unsigned int width, + unsigned int height, + unsigned int pix_fmt) +{ + struct ccic_dev *ccic_dev = dma_dev->ccic_dev; + struct device *dev = ccic_dev->dev; + unsigned int data_fmt = C0_DF_BAYER, imgsz_w = 0, imgsz_h = 0; + unsigned int stride_y = 0, stride_uv = 0; + + switch (pix_fmt) { + case MEDIA_BUS_FMT_UYVY8_2X8: + data_fmt = C0_DF_BAYER; + imgsz_w = width; + stride_y = 0; + imgsz_h = height; + stride_uv = 0; + break; + case MEDIA_BUS_FMT_SBGGR8_1X8: + case MEDIA_BUS_FMT_SGBRG8_1X8: + case MEDIA_BUS_FMT_SGRBG8_1X8: + case MEDIA_BUS_FMT_SRGGB8_1X8: + data_fmt = C0_DF_BAYER; + imgsz_w = width; + stride_y = 0;//CAM_ALIGN(imgsz_w, 8); + imgsz_h = height; + stride_uv = 0; + break; + case MEDIA_BUS_FMT_SBGGR10_1X10: + case MEDIA_BUS_FMT_SGBRG10_1X10: + case MEDIA_BUS_FMT_SGRBG10_1X10: + case MEDIA_BUS_FMT_SRGGB10_1X10: + data_fmt = C0_DF_BAYER; + imgsz_w = width * 5 / 4; + stride_y = 0;//CAM_ALIGN(imgsz_w, 8); + imgsz_h = height; + stride_uv = 0; + break; + case MEDIA_BUS_FMT_SBGGR12_1X12: + case MEDIA_BUS_FMT_SGBRG12_1X12: + case MEDIA_BUS_FMT_SGRBG12_1X12: + case MEDIA_BUS_FMT_SRGGB12_1X12: + data_fmt = C0_DF_BAYER; + imgsz_w = width * 3 / 2; + stride_y = 0;//CAM_ALIGN(imgsz_w, 8); + imgsz_h = height; + stride_uv = 0; + break; + default: + pr_err("%s failed: invalid pixfmt %d\n", __func__, pix_fmt); + return -1; + } + + dev_info(dev, "stride_y=0x%x, width=%u\n", stride_y, width); + ccic_reg_write(ccic_dev, REG_IMGPITCH, stride_uv << 16 | stride_y); + ccic_reg_write(ccic_dev, REG_IMGSIZE, imgsz_h << 16 | imgsz_w); + ccic_reg_write(ccic_dev, REG_IMGOFFSET, 0x0); + ccic_reg_write_mask(ccic_dev, REG_CTRL0, data_fmt, C0_DF_MASK); + /* Make sure it knows we want to use hsync/vsync. */ + ccic_reg_write_mask(ccic_dev, REG_CTRL0, C0_SIF_HVSYNC, C0_SIFM_MASK); + /* Need set following bit for auto-recovery */ + ccic_reg_set_bit(ccic_dev, REG_CTRL0, C0_EOFFLUSH); + + return 0; +} + +static int ccic_dma_set_addr(struct ccic_dma *dma_dev, + unsigned long addr_y, + unsigned long addr_u, + unsigned long addr_v) +{ + struct ccic_dev *ccic_dev = dma_dev->ccic_dev; + + ccic_reg_write(ccic_dev, 0x00, (u32)(addr_y & 0xffffffff)); + ccic_reg_write(ccic_dev, 0x0c, (u32)(addr_u & 0xffffffff)); + ccic_reg_write(ccic_dev, 0x18, (u32)(addr_v & 0xffffffff)); + + return 0; +} + +static int ccic_dma_shadow_ready(struct ccic_dma *dma_dev) +{ + struct ccic_dev *ccic_dev = dma_dev->ccic_dev; + ccic_reg_set_bit(ccic_dev, REG_CTRL1, C1_SHADOW_RDY); + + return 0; +} + +static int ccic_dma_src_select(struct ccic_dma *dma_dev, int src, unsigned int main_ccic_id) +{ + struct ccic_dev *ccic_dev = dma_dev->ccic_dev; + return ccic_dma_src_sel(ccic_dev, src, main_ccic_id); +} + +static void ccic_dma_dump_regs(struct ccic_dma *dma_dev) +{ + unsigned int reg_val = 0; + struct ccic_dev *ccic_dev = dma_dev->ccic_dev; + + reg_val = ccic_reg_read(ccic_dev, 0x30); + printk(KERN_INFO "ccic%d [0x30]=0x%08x\n", ccic_dev->index, reg_val); + reg_val = ccic_reg_read(ccic_dev, 0x28); + printk(KERN_INFO "ccic%d [0x28]=0x%08x\n", ccic_dev->index, reg_val); + reg_val = ccic_reg_read(ccic_dev, 0x2c); + printk(KERN_INFO "ccic%d [0x2c]=0x%08x\n", ccic_dev->index, reg_val); + reg_val = ccic_reg_read(ccic_dev, 0x24); + printk(KERN_INFO "ccic%d [0x24]=0x%08x\n", ccic_dev->index, reg_val); + reg_val = ccic_reg_read(ccic_dev, 0x34); + printk(KERN_INFO "ccic%d [0x34]=0x%08x\n", ccic_dev->index, reg_val); + reg_val = ccic_reg_read(ccic_dev, 0x38); + printk(KERN_INFO "ccic%d [0x38]=0x%08x\n", ccic_dev->index, reg_val); + reg_val = ccic_reg_read(ccic_dev, 0x3c); + printk(KERN_INFO "ccic%d [0x3c]=0x%08x\n", ccic_dev->index, reg_val); + reg_val = ccic_reg_read(ccic_dev, 0x40); + printk(KERN_INFO "ccic%d [0x40]=0x%08x\n", ccic_dev->index, reg_val); + reg_val = ccic_reg_read(ccic_dev, 0x44); + printk(KERN_INFO "ccic%d [0x44]=0x%08x\n", ccic_dev->index, reg_val); + reg_val = ccic_reg_read(ccic_dev, 0x48); + printk(KERN_INFO "ccic%d [0x48]=0x%08x\n", ccic_dev->index, reg_val); + reg_val = ccic_reg_read(ccic_dev, 0x310); + printk(KERN_INFO "ccic%d [0x310]=0x%08x\n", ccic_dev->index, reg_val); + reg_val = ccic_reg_read(ccic_dev, 0x60); + printk(KERN_INFO "ccic%d [0x60]=0x%08x\n", ccic_dev->index, reg_val); + reg_val = ccic_reg_read(ccic_dev, 0x23c); + printk(KERN_INFO "ccic%d [0x23c]=0x%08x\n", ccic_dev->index, reg_val); + reg_val = ccic_reg_read(ccic_dev, 0x128); + printk(KERN_INFO "ccic%d [0x128]=0x%08x\n", ccic_dev->index, reg_val); + reg_val = ccic_reg_read(ccic_dev, 0x12c); + printk(KERN_INFO "ccic%d [0x12c]=0x%08x\n", ccic_dev->index, reg_val); + reg_val = ccic_reg_read(ccic_dev, 0x134); + printk(KERN_INFO "ccic%d [0x134]=0x%08x\n", ccic_dev->index, reg_val); + reg_val = ccic_reg_read(ccic_dev, 0x138); + printk(KERN_INFO "ccic%d [0x138]=0x%08x\n", ccic_dev->index, reg_val); + reg_val = ccic_reg_read(ccic_dev, 0x100); + printk(KERN_INFO "ccic%d [0x100]=0x%08x\n", ccic_dev->index, reg_val); + reg_val = ccic_reg_read(ccic_dev, 0x140); + printk(KERN_INFO "ccic%d [0x140]=0x%08x\n", ccic_dev->index, reg_val); + reg_val = ccic_reg_read(ccic_dev, 0x144); + printk(KERN_INFO "ccic%d [0x144]=0x%08x\n", ccic_dev->index, reg_val); + reg_val = ccic_reg_read(ccic_dev, 0x124); + printk(KERN_INFO "ccic%d [0x124]=0x%08x\n", ccic_dev->index, reg_val); +} static struct ccic_dma_ops ccic_dma_ops = { + .set_fmt = ccic_dma_set_fmt, + .shadow_ready = ccic_dma_shadow_ready, + .set_addr = ccic_dma_set_addr, + .ccic_enable = ccic_dma_enable, .clk_enable = ccic_dma_clk_enable, + .src_sel = ccic_dma_src_select, + .dump_regs = ccic_dma_dump_regs, }; /* @@ -372,10 +573,13 @@ int ccic_clk_enable(struct ccic_ctrl *ctrl, int en) pr_err("rpm get failed\n"); return ret; } + pm_stay_awake(&ccic_dev->pdev->dev); + + clk_prepare_enable(ccic_dev->dpu_clk); + reset_control_deassert(ccic_dev->mclk_reset); //clk_prepare_enable(ccic_dev->ahb_clk); reset_control_deassert(ccic_dev->ahb_reset); - clk_prepare_enable(ccic_dev->clk4x); reset_control_deassert(ccic_dev->ccic_4x_reset); clk_prepare_enable(ccic_dev->csi_clk); @@ -394,9 +598,12 @@ int ccic_clk_enable(struct ccic_ctrl *ctrl, int en) clk_disable_unprepare(ccic_dev->clk4x); reset_control_assert(ccic_dev->ccic_4x_reset); + clk_disable_unprepare(ccic_dev->dpu_clk); + reset_control_assert(ccic_dev->mclk_reset); + //clk_disable_unprepare(ccic_dev->ahb_clk); reset_control_assert(ccic_dev->ahb_reset); - + pm_relax(&ccic_dev->pdev->dev); pm_runtime_put_sync(&ccic_dev->pdev->dev); } @@ -405,13 +612,14 @@ int ccic_clk_enable(struct ccic_ctrl *ctrl, int en) return ret; } -int ccic_config_csi2_mbus(struct ccic_ctrl *ctrl, int md, u8 vc0, u8 vc1, int lanes) +int ccic_config_csi2_mbus(struct ccic_ctrl *ctrl, int md, u8 vc0, u8 vc1, u8 dt0, u8 dt1, + int lanes) { int ret; struct ccic_dev *ccic_dev = ctrl->ccic_dev; struct mipi_csi2 csi2para; - ret = ccic_config_csi2_vc(ctrl, md, vc0, vc1); + ret = ccic_config_csi2_vc_dt(ctrl, md, vc0, vc1, dt0, dt1); if (ret) return ret; @@ -545,9 +753,16 @@ static int ccic_init_clk(struct ccic_dev *dev) if (IS_ERR_OR_NULL(dev->isp_ci_reset)) return PTR_ERR(dev->isp_ci_reset); + dev->mclk_reset = devm_reset_control_get_optional_shared(&dev->pdev->dev, "mclk_reset"); + if (IS_ERR_OR_NULL(dev->mclk_reset)) + return PTR_ERR(dev->mclk_reset); + dev->csi_clk = devm_clk_get(&dev->pdev->dev, "csi_func"); if (IS_ERR(dev->csi_clk)) return PTR_ERR(dev->csi_clk); + dev->dpu_clk = devm_clk_get(&dev->pdev->dev, "dpu_mclk"); + if (IS_ERR(dev->dpu_clk)) + return PTR_ERR(dev->dpu_clk); dev->clk4x = devm_clk_get(&dev->pdev->dev, "ccic_func"); return PTR_ERR_OR_ZERO(dev->clk4x); @@ -650,6 +865,31 @@ void ccic_ctrl_put(struct ccic_ctrl *ctrl) EXPORT_SYMBOL(ccic_ctrl_put); + +int ccic_dma_get(struct ccic_dma **ccic_dma, int id) +{ + struct ccic_dev *ccic_dev = NULL; + struct ccic_dev *tmp = NULL; + struct ccic_dma *dma = NULL; + + list_for_each_entry(tmp, &ccic_devices, list) { + if (tmp->index == id) { + ccic_dev = tmp; + break; + } + } + if (!ccic_dev) { + pr_err("ccic%d not found", id); + return -ENODEV; + } + + dma = ccic_dev->dma; + *ccic_dma = dma; + pr_debug("acquire ccic%d dma dev succeed\n", id); + + return 0; +} +EXPORT_SYMBOL(ccic_dma_get); static void ipe_error_irq_handler(struct ccic_dev *ccic, u32 ipestatus, u32 csi2status) { static DEFINE_RATELIMIT_STATE(rs, 5 * HZ, 20); @@ -664,10 +904,110 @@ static void ipe_error_irq_handler(struct ccic_dev *ccic, u32 ipestatus, u32 csi2 } } +static int ccic_put_dma_work(struct ccic_dma_context *dma_ctx, + struct ccic_dma_work_struct *ccic_dma_work) +{ + unsigned long flags = 0; + + spin_lock_irqsave(&dma_ctx->slock, flags); + list_del_init(&ccic_dma_work->busy_list_entry); + list_add(&ccic_dma_work->idle_list_entry, &dma_ctx->dma_work_idle_list); + spin_unlock_irqrestore(&dma_ctx->slock, flags); + + return 0; +} + +static int ccic_get_dma_work(struct ccic_dma_context *dma_ctx, + struct ccic_dma_work_struct **ccic_dma_work) +{ + unsigned long flags = 0; + + spin_lock_irqsave(&dma_ctx->slock, flags); + *ccic_dma_work = list_first_entry_or_null(&dma_ctx->dma_work_idle_list, struct ccic_dma_work_struct, idle_list_entry); + if (NULL == *ccic_dma_work) { + spin_unlock_irqrestore(&dma_ctx->slock, flags); + return -1; + } + list_del_init(&((*ccic_dma_work)->idle_list_entry)); + list_add(&((*ccic_dma_work)->busy_list_entry), &dma_ctx->dma_work_busy_list); + spin_unlock_irqrestore(&dma_ctx->slock, flags); + + return 0; +} + +static void ccic_dma_bh_handler(struct ccic_dma_work_struct *ccic_dma_work) +{ + struct spm_ccic_vnode *ac_vnode = ccic_dma_work->ac_vnode; + struct device *dev = ac_vnode->ccic_dev->dev; + struct ccic_dma_context *dma_ctx = &ac_vnode->dma_ctx; + struct spm_ccic_vbuffer *n = NULL, *pos = NULL; + //unsigned int irq_status = ccic_dma_work->irq_status; + LIST_HEAD(export_list); + unsigned long flags = 0; + + spin_lock(&ac_vnode->waitq_head.lock); + ac_vnode->in_tasklet = 1; + if (ac_vnode->in_streamoff || !ac_vnode->is_streaming) { + wake_up_locked(&ac_vnode->waitq_head); + spin_unlock(&ac_vnode->waitq_head.lock); + goto dma_tasklet_finish; + } + wake_up_locked(&ac_vnode->waitq_head); + spin_unlock(&ac_vnode->waitq_head.lock); + spin_lock_irqsave(&ac_vnode->slock, flags); + list_for_each_entry_safe(pos, n, &ac_vnode->busy_list, list_entry) { + if (pos->flags & (AC_BUF_FLAG_HW_ERR | AC_BUF_FLAG_SW_ERR | AC_BUF_FLAG_DONE_TOUCH)) { + list_del_init(&(pos->list_entry)); + atomic_dec(&ac_vnode->busy_buf_cnt); + list_add_tail(&(pos->list_entry), &export_list); + } + } + spin_unlock_irqrestore(&ac_vnode->slock, flags); + list_for_each_entry_safe(pos, n, &export_list, list_entry) { + if (!(pos->flags & AC_BUF_FLAG_SOF_TOUCH)) { + dev_warn(dev, "%s export buf index=%u frameid=%u without sof touch\n", ac_vnode->name, pos->vb2_v4l2_buf.vb2_buf.index, pos->vb2_v4l2_buf.sequence); + } + if (pos->flags & AC_BUF_FLAG_HW_ERR) { + //pos->vb2_v4l2_buf.flags |= V4L2_BUF_FLAG_ERROR_HW; + dev_warn(dev, "%s export buf index=%u frameid=%u with hw error\n", ac_vnode->name, pos->vb2_v4l2_buf.vb2_buf.index, pos->vb2_v4l2_buf.sequence); + spm_cvdev_export_ccic_vbuffer(pos, 1); + ac_vnode->hw_err_frm++; + } else if (pos->flags & AC_BUF_FLAG_SW_ERR) { + //pos->vb2_v4l2_buf.flags |= V4L2_BUF_FLAG_ERROR_SW; + dev_warn(dev, "%s export buf index=%u frameid=%u with sw error\n", ac_vnode->name, pos->vb2_v4l2_buf.vb2_buf.index, pos->vb2_v4l2_buf.sequence); + spm_cvdev_export_ccic_vbuffer(pos, 1); + ac_vnode->sw_err_frm++; + } else if (pos->flags & AC_BUF_FLAG_DONE_TOUCH) { + spm_cvdev_export_ccic_vbuffer(pos, 0); + ac_vnode->ok_frm++; + } + } +dma_tasklet_finish: + if (ac_vnode) { + spin_lock(&ac_vnode->waitq_head.lock); + ac_vnode->in_tasklet = 0; + wake_up_locked(&ac_vnode->waitq_head); + spin_unlock(&ac_vnode->waitq_head.lock); + } + ccic_put_dma_work(dma_ctx, ccic_dma_work); +} + +static void ccic_dma_tasklet_handler(unsigned long param) +{ + struct ccic_dma_work_struct *ccic_dma_work = (struct ccic_dma_work_struct*)param; + ccic_dma_bh_handler(ccic_dma_work); +} static irqreturn_t k1x_ccic_isr(int irq, void *data) { struct ccic_dev *ccic_dev = data; - uint32_t irqs, csi2status; + struct spm_ccic_vnode *ac_vnode = (struct spm_ccic_vnode*)ccic_dev->vnode; + struct ccic_dma_context *dma_ctx = &ac_vnode->dma_ctx; + struct spm_ccic_vbuffer *pos = NULL, *ac_vb = NULL; + struct ccic_dma_work_struct *ccic_dma_work = NULL; + struct ccic_dma *ccic_dma = ac_vnode->ccic_dev->dma; + struct device *dev = ac_vnode->ccic_dev->dev; + uint32_t irqs = 0, csi2status = 0, tmp = 0; + int ret = 0; irqs = ccic_reg_read(ccic_dev, REG_IRQSTAT); if (!(irqs & ~IRQ_IDI_PRO_LINE)) @@ -682,8 +1022,8 @@ static irqreturn_t k1x_ccic_isr(int irq, void *data) if (irqs & IRQ_DMA_PRO_LINE) pr_debug("CCIC%d: IRQ_DMA_PRO_LINE\n", ccic_dev->index); - if (irqs & IRQ_IDI_PRO_LINE) - pr_debug("CCIC%d: IRQ_IDI_PRO_LINE\n", ccic_dev->index); + //if (irqs & IRQ_IDI_PRO_LINE) + // pr_debug("CCIC%d: IRQ_IDI_PRO_LINE\n", ccic_dev->index); if (irqs & IRQ_CSI2IDI_FLUSH) pr_debug("CCIC%d: IRQ_CSI2IDI_FLUSH\n", ccic_dev->index); @@ -700,6 +1040,84 @@ static irqreturn_t k1x_ccic_isr(int irq, void *data) if (irqs & IRQ_DPHY_LN_ULPS_ACTIVE) pr_debug("CCIC%d: IRQ_DPHY_LN_ULPS_ACTIVE\n", ccic_dev->index); + //if (irqs & IRQ_DMA_SOF) { + // dev_dbg(dev, "CCIC%d: IRQ_DMA_SOF\n", ccic_dev->index); + //} + if (irqs & IRQ_DMA_SOF || irqs & IRQ_SHADOW_NOT_RDY) { + ac_vnode->frame_id++; + ac_vnode->total_frm++; + } + spin_lock(&ac_vnode->waitq_head.lock); + ac_vnode->in_irq = 1; + if (ac_vnode->in_streamoff) { + ac_vnode->in_irq = 0; + wake_up_locked(&ac_vnode->waitq_head); + spin_unlock(&ac_vnode->waitq_head.lock); + return IRQ_HANDLED; + } + spin_unlock(&ac_vnode->waitq_head.lock); + if (ac_vnode->is_streaming && (irqs & FRAMEIRQS)) { + //if (irqs & IRQ_CSI_SOF) { + if (irqs & IRQ_DMA_SOF || irqs & IRQ_SHADOW_NOT_RDY) { + spm_cvdev_dq_idle_vbuffer(ac_vnode, &ac_vb); + if (ac_vb) { + spm_cvdev_q_busy_vbuffer(ac_vnode, ac_vb); + ccic_update_dma_addr(ac_vnode, ac_vb, 0); + ccic_dma->ops->shadow_ready(ccic_dma); + } + } + tmp = irqs; + spin_lock(&(ac_vnode->slock)); + list_for_each_entry(pos, &(ac_vnode->busy_list), list_entry) { + if (!tmp) + break; + if (tmp & (IRQ_DMA_OVERFLOW | IRQ_DMA_NOT_DONE)) { + if (!(pos->flags & AC_BUF_FLAG_SOF_TOUCH)) { + dev_info(dev, "CCIC%d: dma err(0x%08x) without sof, drop it\n", ccic_dev->index, tmp); + tmp &= ~(IRQ_DMA_OVERFLOW | IRQ_DMA_NOT_DONE); + } else if (!(pos->flags & AC_BUF_FLAG_HW_ERR)) { + pos->flags |= AC_BUF_FLAG_HW_ERR; + dev_info(dev, "CCIC%d: dma err(0x%08x)\n", ccic_dev->index, tmp); + tmp &= ~(IRQ_DMA_OVERFLOW | IRQ_DMA_NOT_DONE); + } + } + if (tmp & IRQ_DMA_EOF) { + if (!(pos->flags & AC_BUF_FLAG_SOF_TOUCH)) { + dev_info(dev, "CCIC%d: dma done without sof, drop it\n", ccic_dev->index); + tmp &= ~IRQ_DMA_EOF; + } else if (!(pos->flags & AC_BUF_FLAG_DONE_TOUCH)) { + pos->flags |= AC_BUF_FLAG_DONE_TOUCH; + pos->vb2_v4l2_buf.sequence = ac_vnode->frame_id - 1; + pos->vb2_v4l2_buf.vb2_buf.timestamp = ktime_get_ns(); + tmp &= ~IRQ_DMA_EOF; + //dev_info(dev, "CCIC%d: dma done\n", ccic_dev->index); + } + } + if (tmp & IRQ_DMA_SOF) { + if (pos->flags & AC_BUF_FLAG_SOF_TOUCH) { + if (!(pos->flags & (AC_BUF_FLAG_DONE_TOUCH | AC_BUF_FLAG_HW_ERR | AC_BUF_FLAG_SW_ERR))) { + dev_warn(dev, "CCIC%d: next sof arrived without dma done or err\n", ccic_dev->index); + pos->flags |= AC_BUF_FLAG_SW_ERR; + } + } else { + pos->flags |= (AC_BUF_FLAG_SOF_TOUCH | AC_BUF_FLAG_TIMESTAMPED); + tmp &= ~IRQ_DMA_SOF; + } + } + } + spin_unlock(&(ac_vnode->slock)); + ret = ccic_get_dma_work(dma_ctx, &ccic_dma_work); + if (ret) { + dev_warn(dev, "CCIC%d: dma work idle list was null\n", ccic_dev->index); + } else { + ccic_dma_work->irq_status = irqs; + tasklet_schedule(&(ccic_dma_work->dma_tasklet)); + } + } + spin_lock(&ac_vnode->waitq_head.lock); + ac_vnode->in_irq = 0; + wake_up_locked(&ac_vnode->waitq_head); + spin_unlock(&ac_vnode->waitq_head.lock); return IRQ_HANDLED; } @@ -710,10 +1128,8 @@ static int k1x_ccic_probe(struct platform_device *pdev) struct ccic_ctrl *ccic_ctrl; struct ccic_dma *ccic_dma; struct device *dev = &pdev->dev; + char buf[32]; int ret; - int irq; - - pr_debug("%s begin to probe\n", dev_name(&pdev->dev)); ret = of_property_read_u32(np, "cell-index", &pdev->id); if (ret < 0) { @@ -753,13 +1169,13 @@ static int k1x_ccic_probe(struct platform_device *pdev) } /* get irqs */ - irq = platform_get_irq_byname(pdev, "ipe-irq"); - if (irq < 0) { + ccic_dev->irq = platform_get_irq_byname(pdev, "ipe-irq"); + if (ccic_dev->irq < 0) { dev_err(&pdev->dev, "no irq resource"); return -ENODEV; } - dev_dbg(&pdev->dev, "ipe irq: %d\n", irq); - ret = devm_request_irq(&pdev->dev, irq, k1x_ccic_isr, + + ret = devm_request_irq(&pdev->dev, ccic_dev->irq, k1x_ccic_isr, IRQF_SHARED, K1X_CCIC_DRV_NAME, ccic_dev); if (ret) { dev_err(&pdev->dev, "fail to request irq\n"); @@ -786,7 +1202,7 @@ static int k1x_ccic_probe(struct platform_device *pdev) ccic_dev->dev = &pdev->dev; ccic_dev->ctrl = ccic_ctrl; ccic_dev->dma = ccic_dma; - ccic_dev->interrupt_mask_value = CSI2PHYERRS; + ccic_dev->interrupt_mask_value = CSI2PHYERRS | FRAMEIRQS; dev_set_drvdata(dev, ccic_dev); /* enable runtime pm */ @@ -796,7 +1212,24 @@ static int k1x_ccic_probe(struct platform_device *pdev) ccic_device_register(ccic_dev); - pr_debug("%s probed", dev_name(&pdev->dev)); + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(33)); + ret = v4l2_device_register(&pdev->dev, &ccic_dev->v4l2_dev); + if (ret) { + dev_err(&pdev->dev, "failed to register v4l2 dev\n"); + return ret; + } + snprintf(buf, 32, "CCIC%d", ccic_ctrl->index); + ccic_dev->vnode = spm_cvdev_create_vnode(buf, ccic_dev->index, + &ccic_dev->v4l2_dev, + &pdev->dev, + ccic_dev, + ccic_dma_tasklet_handler, + 0); + if (NULL == ccic_dev->vnode) { + dev_err(&pdev->dev, "failed to create ccic vnode\n"); + return -EPROBE_DEFER; + } + pr_info("%s probed in %s", dev_name(&pdev->dev), __func__); return ret; } @@ -809,6 +1242,8 @@ static int k1x_ccic_remove(struct platform_device *pdev) ccic_dev = dev_get_drvdata(&pdev->dev); dma = ccic_dev->dma; + spm_cvdev_destroy_vnode((struct spm_ccic_vnode*)ccic_dev->vnode); + v4l2_device_unregister(&ccic_dev->v4l2_dev); ccic_device_unregister(ccic_dev); /* disable runtime pm */ diff --git a/drivers/media/platform/spacemit/camera/cam_ccic/ccic_drv.h b/drivers/media/platform/spacemit/camera/cam_ccic/ccic_drv.h index e54587c6f0f2..1d9f652816c8 100644 --- a/drivers/media/platform/spacemit/camera/cam_ccic/ccic_drv.h +++ b/drivers/media/platform/spacemit/camera/cam_ccic/ccic_drv.h @@ -48,8 +48,8 @@ struct mipi_csi2 { int dphy_type; /* 0: DPHY on chip, 1: DPTC off chip */ u32 dphy[5]; /* DPHY: CSI2_DPHY1, CSI2_DPHY2, CSI2_DPHY3, CSI2_DPHY5, CSI2_DPHY6 */ int calc_dphy; - int enable_dpcm; struct csi_dphy_desc dphy_desc; + int enable_dpcm; }; #define HS_SETTLE_POS_MAX (100) @@ -106,36 +106,27 @@ enum ccic_csi2vc_mode { CCIC_CSI2VC_NM = 0, CCIC_CSI2VC_VC, CCIC_CSI2VC_DT, + CCIC_CSI2VC_VCDT, }; enum ccic_csi2vc_chnl { CCIC_CSI2VC_MAIN = 0, - CCIC_CSI2VC_VCDT, + CCIC_CSI2VC_SUB, }; struct ccic_ctrl_ops { void (*irq_mask)(struct ccic_ctrl *ctrl, int on); int (*clk_enable)(struct ccic_ctrl *ctrl, int en); - int (*config_csi2_mbus)(struct ccic_ctrl *ctrl, int md, u8 vc0, u8 vc1, - int lanes); + int (*config_csi2_mbus)(struct ccic_ctrl *ctrl, int md, u8 vc0, u8 vc1, u8 dt0, u8 dt1, int lanes); int (*config_csi2idi_mux)(struct ccic_ctrl *ctrl, int chnl, int idi, int en); int (*reset_csi2idi)(struct ccic_ctrl *ctrl, int idi, int rst); }; struct ccic_dma { int index; - struct v4l2_device v4l2_dev; - struct video_device vdev; struct ccic_dev *ccic_dev; - struct v4l2_pix_format pix_format; struct mutex ops_mutex; spinlock_t dev_lock; - struct list_head pending_bq; - struct list_head active_bq; - struct vb2_queue vb_queue; - u32 csi_sof_cnt; - u32 dma_sof_cnt; - u32 dma_eof_cnt; struct ccic_dma_ops *ops; }; @@ -148,11 +139,16 @@ enum ccic_dma_sel { }; struct ccic_dma_ops { - int (*setup_image)(struct ccic_dma *dma_dev); - int (*shadow_ready)(struct ccic_dma *dma_dev, int enable); - int (*set_addr)(struct ccic_dma *dma_dev, u8 chnl, u32 addr); - int (*ccic_enable)(struct ccic_dma *dma_dev, int enable); - int (*clk_enable)(struct ccic_dma *dma_dev, int enable); + int (*set_fmt)(struct ccic_dma *dma_dev, + unsigned int width, + unsigned int height, + unsigned int pix_fmt); + int (*shadow_ready)(struct ccic_dma *dma_dev); + int (*set_addr)(struct ccic_dma *dma_dev, unsigned long addr_y, unsigned long addr_u, unsigned long addr_v); + int (*ccic_enable)(struct ccic_dma *dma_dev, int enable); + int (*clk_enable)(struct ccic_dma *dma_dev, int enable); + int (*src_sel)(struct ccic_dma *dma_dev, int src, unsigned int main_ccic_id); + void (*dump_regs)(struct ccic_dma *dma_dev); }; struct ccic_dev { @@ -160,17 +156,19 @@ struct ccic_dev { struct device *dev; struct platform_device *pdev; struct list_head list; - struct resource *irq; + int irq; struct resource *mem; void __iomem *base; struct clk *csi_clk; struct clk *clk4x; -// struct clk *ahb_clk; + //struct clk *ahb_clk; struct clk *axi_clk; + struct clk *dpu_clk; struct reset_control *ahb_reset; struct reset_control *csi_reset; struct reset_control *ccic_4x_reset; struct reset_control *isp_ci_reset; + struct reset_control *mclk_reset; int dma_burst; spinlock_t ccic_lock; /* protect the struct members and HW */ @@ -182,6 +180,8 @@ struct ccic_dev { struct ccic_dma *dma; /* object for csiphy part */ struct csiphy_device *csiphy; + struct v4l2_device v4l2_dev; + void *vnode; }; /* @@ -220,5 +220,6 @@ static inline void ccic_reg_clear_bit(struct ccic_dev *ccic_dev, int ccic_ctrl_get(struct ccic_ctrl **ctrl_host, int id, irqreturn_t(*handler) (struct ccic_ctrl *, u32)); +int ccic_dma_get(struct ccic_dma **ccic_dma, int id); int ccic_dphy_hssettle_set(unsigned int ccic_id, unsigned int dphy_freg); #endif diff --git a/drivers/media/platform/spacemit/camera/cam_ccic/ccic_hwreg.c b/drivers/media/platform/spacemit/camera/cam_ccic/ccic_hwreg.c index 917877aa63ec..65bca4112aef 100644 --- a/drivers/media/platform/spacemit/camera/cam_ccic/ccic_hwreg.c +++ b/drivers/media/platform/spacemit/camera/cam_ccic/ccic_hwreg.c @@ -64,7 +64,7 @@ int ccic_csi2_lanes_enable(struct ccic_dev *ccic_dev, int lanes) return 0; } - +#if 0 int ccic_csi2_vc_ctrl(struct ccic_dev *ccic_dev, int md, u8 vc0, u8 vc1) { int ret = 0; @@ -94,27 +94,51 @@ int ccic_csi2_vc_ctrl(struct ccic_dev *ccic_dev, int md, u8 vc0, u8 vc1) return ret; } - -int ccic_dma_src_sel(struct ccic_dev *ccic_dev, int sel) +#endif +int ccic_dma_src_sel(struct ccic_dev *ccic_dev, int sel, unsigned int main_ccic_id) { switch (sel) { case CCIC_DMA_SEL_LOCAL_MAIN: - ccic_reg_clear_bit(ccic_dev, REG_CSI2_CTRL0, CSI2_C0_EXT_TIM_ENA); - ccic_reg_clear_bit(ccic_dev, REG_CSI2_CTRL0, CSI2_C0_VCDC_SEL); - /* FIXME: no need */ - ccic_reg_set_bit(ccic_dev, REG_CSI2_CTRL0, CSI2_C0_ENABLE); - break; case CCIC_DMA_SEL_LOCAL_VCDT: - ccic_reg_clear_bit(ccic_dev, REG_CSI2_CTRL0, CSI2_C0_EXT_TIM_ENA); - ccic_reg_set_bit(ccic_dev, REG_CSI2_CTRL0, CSI2_C0_VCDC_SEL); + ccic_reg_clear_bit(ccic_dev, REG_CSI2_CTRL0, + CSI2_C0_EXT_TIM_ENA); + ccic_reg_clear_bit(ccic_dev, REG_CSI2_CTRL0, CSI2_C0_VCDT_SEL); /* FIXME: no need */ ccic_reg_set_bit(ccic_dev, REG_CSI2_CTRL0, CSI2_C0_ENABLE); break; case CCIC_DMA_SEL_REMOTE_VCDT: - ccic_reg_set_bit(ccic_dev, REG_CSI2_CTRL0, CSI2_C0_EXT_TIM_ENA); - ccic_reg_clear_bit(ccic_dev, REG_CSI2_CTRL0, CSI2_C0_VCDC_SEL); - /* When EXT_TIM_ENA is enabled, this field must be enabled too. */ - ccic_reg_set_bit(ccic_dev, REG_CSI2_CTRL0, CSI2_C0_ENABLE); + if (ccic_dev->index == 0) { + if (main_ccic_id == 2) { + ccic_reg_set_bit(ccic_dev, REG_CSI2_CTRL0, CSI2_C0_EXT_TIM_ENA); + ccic_reg_clear_bit(ccic_dev, REG_CSI2_CTRL0, CSI2_C0_VCDT_SEL); + ccic_reg_set_bit(ccic_dev, REG_CSI2_CTRL0, CSI2_C0_ENABLE); + } else { + ccic_reg_set_bit(ccic_dev, REG_CSI2_CTRL0, CSI2_C0_EXT_TIM_ENA); + ccic_reg_set_bit(ccic_dev, REG_CSI2_CTRL0, CSI2_C0_VCDT_SEL); + ccic_reg_set_bit(ccic_dev, REG_CSI2_CTRL0, CSI2_C0_ENABLE); + } + } else if (ccic_dev->index == 1) { + if (main_ccic_id == 2) { + ccic_reg_set_bit(ccic_dev, REG_CSI2_CTRL0, CSI2_C0_EXT_TIM_ENA); + ccic_reg_clear_bit(ccic_dev, REG_CSI2_CTRL0, CSI2_C0_VCDT_SEL); + ccic_reg_set_bit(ccic_dev, REG_CSI2_CTRL0, CSI2_C0_ENABLE); + } else { + ccic_reg_set_bit(ccic_dev, REG_CSI2_CTRL0, CSI2_C0_EXT_TIM_ENA); + ccic_reg_set_bit(ccic_dev, REG_CSI2_CTRL0, CSI2_C0_VCDT_SEL); + ccic_reg_set_bit(ccic_dev, REG_CSI2_CTRL0, CSI2_C0_ENABLE); + } + } else { + if (main_ccic_id == 0) { + ccic_reg_set_bit(ccic_dev, REG_CSI2_CTRL0, CSI2_C0_EXT_TIM_ENA); + ccic_reg_clear_bit(ccic_dev, REG_CSI2_CTRL0, CSI2_C0_VCDT_SEL); + ccic_reg_set_bit(ccic_dev, REG_CSI2_CTRL0, CSI2_C0_ENABLE); + } else { + ccic_reg_set_bit(ccic_dev, REG_CSI2_CTRL0, CSI2_C0_EXT_TIM_ENA); + ccic_reg_set_bit(ccic_dev, REG_CSI2_CTRL0, CSI2_C0_VCDT_SEL); + /* When EXT_TIM_ENA is enabled, this field must be enabled too. */ + ccic_reg_set_bit(ccic_dev, REG_CSI2_CTRL0, CSI2_C0_ENABLE); + } + } break; case CCIC_DMA_SEL_REMOTE_MAIN: default: diff --git a/drivers/media/platform/spacemit/camera/cam_ccic/ccic_hwreg.h b/drivers/media/platform/spacemit/camera/cam_ccic/ccic_hwreg.h index 65db3b2711f3..a9e9f533f475 100644 --- a/drivers/media/platform/spacemit/camera/cam_ccic/ccic_hwreg.h +++ b/drivers/media/platform/spacemit/camera/cam_ccic/ccic_hwreg.h @@ -8,6 +8,9 @@ #ifndef __CCIC_HWREG_H__ #define __CCIC_HWREG_H__ +#ifndef BIT +#define BIT(nr) (1 << (nr)) +#endif #define REG_Y0BAR 0x00 #define REG_U0BAR 0x0c #define REG_V0BAR 0x18 @@ -51,7 +54,7 @@ #define IRQ_CSI2PARSE_ERR (BIT(29)) #define IRQ_CSI2GENSHORTPACKVALID (BIT(30)) #define IRQ_CSI2GENSHORTPACK_ERR (BIT(31)) -// #define FRAMEIRQS (IRQ_CSI_SOF | IRQ_CSI_EOF | IRQ_DMA_SOF | IRQ_DMA_EOF) +// #define FRAMEIRQS (IRQ_CSI_SOF | IRQ_CSI_EOF | IRQ_DMA_SOF | IRQ_DMA_EOF | IRQ_DMA_OVERFLOW | IRQ_DMA_NOT_DONE | IRQ_SHADOW_NOT_RDY) #define FRAMEIRQS (IRQ_DMA_SOF | IRQ_DMA_EOF | IRQ_DMA_OVERFLOW | IRQ_DMA_NOT_DONE | IRQ_SHADOW_NOT_RDY) #define CSI2PHYERRS (0xFF0B0000) #define ALLIRQS (FRAMEIRQS | CSI2PHYERRS | IRQ_CSI2IDI_HBLK2HSYNC) @@ -151,7 +154,7 @@ #define CSI2_C0_EXT_TIM_ENA (0x1 << 3) #define CSI2_C0_VLEN (0x4 << 4) #define CSI2_C0_VLEN_MASK (0xf << 4) -#define CSI2_C0_VCDC_SEL (0x1 << 13) +#define CSI2_C0_VCDT_SEL (0x1 << 13) #define REG_CSI2_VCCTRL 0x114 #define CSI2_VCCTRL_MD_MASK (0x3 << 0) #define CSI2_VCCTRL_MD_NORMAL (0x0 << 0) @@ -160,6 +163,17 @@ #define CSI2_VCCTRL_VC0_MASK (0x3 << 14) #define CSI2_VCCTRL_DT1_MASK (0x3 << 16) #define CSI2_VCCTRL_VC1_MASK (0x3 << 22) +#define CSI2_VCCTRL_DT_ENABLE (0x1 << 24) +#define REG_CSI2_DT_FLT 0x11c +#define CSI2_DT_FLT0_MASK (BIT(0)|BIT(1)|BIT(2)|BIT(3)|BIT(4)|BIT(5)) +#define CSI2_DT_FLT0_SHIFT (0) +#define CSI2_DT_FLT0_EN (BIT(6)) +#define CSI2_DT_FLT1_MASK (BIT(8)|BIT(9)|BIT(10)|BIT(11)|BIT(12)|BIT(13)) +#define CSI2_DT_FLT1_SHIFT (8) +#define CSI2_DT_FLT1_EN (BIT(14)) +#define CSI2_DT_FLT2_MASK (BIT(16)|BIT(17)|BIT(18)|BIT(19)|BIT(20)|BIT(21)) +#define CSI2_DT_FLT2_SHIFT (16) +#define CSI2_DT_FLT2_EN (BIT(22)) #define REG_CSI2_DPHY1 0x124 #define CSI2_DHPY1_ANA_PU (0x1 << 0) #define CSI2_DHPY1_BIF_EN (0x1 << 1) @@ -225,12 +239,11 @@ int ccic_csi2_config_dphy(struct ccic_dev *ccic_dev, int lanes, int enable); int ccic_csi2_lanes_enable(struct ccic_dev *ccic_dev, int lanes); -int ccic_csi2_vc_ctrl(struct ccic_dev *ccic_dev, int md, u8 vc0, u8 vc1); -int ccic_dma_src_sel(struct ccic_dev *ccic_dev, int sel); -int ccic_dma_set_out_format(struct ccic_dev *ccic_dev, u32 pixfmt, u32 width, - u32 height); +//int ccic_csi2_vc_ctrl(struct ccic_dev *ccic_dev, int md, u8 vc0, u8 vc1); +int ccic_dma_src_sel(struct ccic_dev *ccic_dev, int sel, unsigned int main_ccic_id); +int ccic_dma_set_out_format(struct ccic_dev *ccic_dev, u32 pixfmt, u32 width, u32 height); int ccic_dma_set_burst(struct ccic_dev *ccic_dev); -void ccic_dma_enable(struct ccic_dev *ccic_dev, int en); +//void ccic_dma_enable(struct ccic_dev *ccic_dev, int en); int ccic_csi2idi_src_sel(struct ccic_dev *ccic_dev, int sel); void ccic_csi2idi_reset(struct ccic_dev *ccic_dev, int reset); void ccic_hw_dump_regs(struct ccic_dev *ccic_dev); diff --git a/drivers/media/platform/spacemit/camera/cam_ccic/ccic_vdev.c b/drivers/media/platform/spacemit/camera/cam_ccic/ccic_vdev.c new file mode 100644 index 000000000000..c60e5cedf46e --- /dev/null +++ b/drivers/media/platform/spacemit/camera/cam_ccic/ccic_vdev.c @@ -0,0 +1,1407 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * vdev.c - video divece functions + * + * Copyright(C) 2019 SPM Micro Limited + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ccic_vdev.h" + +#define pr_dbg(format, ...) + +static struct { + __u32 pixelformat; + __u8 num_planes; + __u32 pixel_width_align; + __u32 pixel_height_align; + __u32 plane_bytes_align[VIDEO_MAX_PLANES]; + struct { + __u32 num; + __u32 den; + }plane_bpp[VIDEO_MAX_PLANES]; + struct { + __u32 num; + __u32 den; + }height_subsampling[VIDEO_MAX_PLANES]; +} spm_ccic_formats_table[] = { + /* bayer raw8 */ + { + .pixelformat = V4L2_PIX_FMT_SBGGR8, + .num_planes = 1, + .pixel_width_align = 1, + .plane_bytes_align = { + [0] = 1, + }, + .plane_bpp = { + [0] = { + .num = 8, + .den = 1, + }, + }, + .height_subsampling = { + [0] = { + .num = 1, + .den = 1, + }, + }, + }, + { + .pixelformat = V4L2_PIX_FMT_SGBRG8, + .num_planes = 1, + .pixel_width_align = 1, + .plane_bytes_align = { + [0] = 1, + }, + .plane_bpp = { + [0] = { + .num = 8, + .den = 1, + }, + }, + .height_subsampling = { + [0] = { + .num = 1, + .den = 1, + }, + }, + }, + { + .pixelformat = V4L2_PIX_FMT_SGRBG8, + .num_planes = 1, + .pixel_width_align = 1, + .plane_bytes_align = { + [0] = 1, + }, + .plane_bpp = { + [0] = { + .num = 8, + .den = 1, + }, + }, + .height_subsampling = { + [0] = { + .num = 1, + .den = 1, + }, + }, + }, + { + .pixelformat = V4L2_PIX_FMT_SRGGB8, + .num_planes = 1, + .pixel_width_align = 1, + .plane_bytes_align = { + [0] = 1, + }, + .plane_bpp = { + [0] = { + .num = 8, + .den = 1, + }, + }, + .height_subsampling = { + [0] = { + .num = 1, + .den = 1, + }, + }, + }, + /* bayer raw10 */ + { + .pixelformat = V4L2_PIX_FMT_SBGGR10P, + .num_planes = 1, + .pixel_width_align = 1, + .plane_bytes_align = { + [0] = 1, + }, + .plane_bpp = { + [0] = { + .num = 10, + .den = 1, + }, + }, + .height_subsampling = { + [0] = { + .num = 1, + .den = 1, + }, + }, + }, + { + .pixelformat = V4L2_PIX_FMT_SGBRG10P, + .num_planes = 1, + .pixel_width_align = 1, + .plane_bytes_align = { + [0] = 1, + }, + .plane_bpp = { + [0] = { + .num = 10, + .den = 1, + }, + }, + .height_subsampling = { + [0] = { + .num = 1, + .den = 1, + }, + }, + }, + { + .pixelformat = V4L2_PIX_FMT_SGRBG10P, + .num_planes = 1, + .pixel_width_align = 1, + .plane_bytes_align = { + [0] = 1, + }, + .plane_bpp = { + [0] = { + .num = 10, + .den = 1, + }, + }, + .height_subsampling = { + [0] = { + .num = 1, + .den = 1, + }, + }, + }, + { + .pixelformat = V4L2_PIX_FMT_SRGGB10P, + .num_planes = 1, + .pixel_width_align = 1, + .plane_bytes_align = { + [0] = 1, + }, + .plane_bpp = { + [0] = { + .num = 10, + .den = 1, + }, + }, + .height_subsampling = { + [0] = { + .num = 1, + .den = 1, + }, + }, + }, + /* bayer raw12 */ + { + .pixelformat = V4L2_PIX_FMT_SBGGR12P, + .num_planes = 1, + .pixel_width_align = 1, + .plane_bytes_align = { + [0] = 1, + }, + .plane_bpp = { + [0] = { + .num = 12, + .den = 1, + }, + }, + .height_subsampling = { + [0] = { + .num = 1, + .den = 1, + }, + }, + }, + { + .pixelformat = V4L2_PIX_FMT_SGBRG12P, + .num_planes = 1, + .pixel_width_align = 1, + .plane_bytes_align = { + [0] = 1, + }, + .plane_bpp = { + [0] = { + .num = 12, + .den = 1, + }, + }, + .height_subsampling = { + [0] = { + .num = 1, + .den = 1, + }, + }, + }, + { + .pixelformat = V4L2_PIX_FMT_SGRBG12P, + .num_planes = 1, + .pixel_width_align = 1, + .plane_bytes_align = { + [0] = 1, + }, + .plane_bpp = { + [0] = { + .num = 12, + .den = 1, + }, + }, + .height_subsampling = { + [0] = { + .num = 1, + .den = 1, + }, + }, + }, + { + .pixelformat = V4L2_PIX_FMT_SRGGB12P, + .num_planes = 1, + .pixel_width_align = 1, + .plane_bytes_align = { + [0] = 1, + }, + .plane_bpp = { + [0] = { + .num = 12, + .den = 1, + }, + }, + .height_subsampling = { + [0] = { + .num = 1, + .den = 1, + }, + }, + }, + /* yuv */ + /* YUYV YUV422 */ + { + .pixelformat = V4L2_PIX_FMT_YUYV, + .num_planes = 1, + .pixel_width_align = 2, + .plane_bytes_align = { + [0] = 1, + }, + .plane_bpp = { + [0] = { + .num = 16, + .den = 1, + }, + }, + .height_subsampling = { + [0] = { + .num = 1, + .den = 1, + }, + }, + }, + /* YVYU YUV422 */ + { + .pixelformat = V4L2_PIX_FMT_YVYU, + .num_planes = 1, + .pixel_width_align = 2, + .plane_bytes_align = { + [0] = 1, + }, + .plane_bpp = { + [0] = { + .num = 16, + .den = 1, + }, + }, + .height_subsampling = { + [0] = { + .num = 1, + .den = 1, + }, + }, + }, +}; + +static int spm_cvdev_lookup_formats_table(struct v4l2_format *f, int *bit_depth) +{ + struct v4l2_pix_format_mplane *pix_fmt = &f->fmt.pix_mp; + int loop = 0; + + for (loop = 0; loop < ARRAY_SIZE(spm_ccic_formats_table); loop++) { + if (spm_ccic_formats_table[loop].pixelformat == pix_fmt->pixelformat) { + *bit_depth = spm_ccic_formats_table[loop].plane_bpp[0].num; + break; + } + } + if (loop >= ARRAY_SIZE(spm_ccic_formats_table)) + return -1; + + return 0; +} + +void spm_cvdev_fill_v4l2_format(struct v4l2_format *f) +{ + int loop = 0, plane = 0; + unsigned int width = 0, height = 0, stride = 0; + struct v4l2_plane_pix_format *plane_fmt = NULL; + + for (loop = 0; loop < ARRAY_SIZE(spm_ccic_formats_table); loop++) { + if (f->fmt.pix_mp.pixelformat == spm_ccic_formats_table[loop].pixelformat) { + width = CAM_ALIGN(f->fmt.pix_mp.width, spm_ccic_formats_table[loop].pixel_width_align); + if (0 == spm_ccic_formats_table[loop].pixel_height_align) + spm_ccic_formats_table[loop].pixel_height_align = 1; + height = CAM_ALIGN(f->fmt.pix_mp.height, spm_ccic_formats_table[loop].pixel_height_align); + pr_dbg("%s width=%u, width_align=%u",__func__ ,width, spm_ccic_formats_table[loop].pixel_width_align); + f->fmt.pix_mp.num_planes = spm_ccic_formats_table[loop].num_planes; + for (plane = 0; plane < f->fmt.pix_mp.num_planes; plane++) { + plane_fmt = &f->fmt.pix_mp.plane_fmt[plane]; + stride = CAM_ALIGN((width * spm_ccic_formats_table[loop].plane_bpp[plane].num) / (spm_ccic_formats_table[loop].plane_bpp[plane].den * 8), + spm_ccic_formats_table[loop].plane_bytes_align[plane]); + plane_fmt->sizeimage = + height * stride * spm_ccic_formats_table[loop].height_subsampling[plane].num / spm_ccic_formats_table[loop].height_subsampling[plane].den; + plane_fmt->bytesperline = stride; + pr_dbg("plane%d stride=%u", plane, stride); + } + break; + } + } +} + +static int spm_cvdev_queue_setup(struct vb2_queue *q, + unsigned int *num_buffers, + unsigned int *num_planes, + unsigned int sizes[], + struct device *alloc_devs[]) +{ + struct spm_ccic_vnode *ac_vnode = container_of(q, struct spm_ccic_vnode, buf_queue); + int loop = 0; + + if (num_buffers && num_planes) { + *num_planes = ac_vnode->cur_fmt.fmt.pix_mp.num_planes; + pr_dbg("%s num_buffers=%d num_planes=%d ", __func__, *num_buffers, *num_planes); + for (loop = 0; loop < *num_planes; loop++) { + sizes[loop] = ac_vnode->cur_fmt.fmt.pix_mp.plane_fmt[loop].sizeimage; + pr_dbg("plane%d size=%u ", loop, sizes[loop]); + } + } + else { + pr_err("%s NULL num_buffers or num_planes\n", __func__); + return -EINVAL; + } + + return 0; +} + +static void spm_cvdev_wait_prepare(struct vb2_queue *q) +{ + //going to wait sleep, release all locks that may block any vb2 buf/stream functions + struct spm_ccic_vnode *ac_vnode = container_of(q, struct spm_ccic_vnode, buf_queue); + mutex_unlock(&ac_vnode->mlock); +} + +static void spm_cvdev_wait_finish(struct vb2_queue *q) +{ + //wakeup from wait sleep, reacquire all locks + struct spm_ccic_vnode *ac_vnode = container_of(q, struct spm_ccic_vnode, buf_queue); + mutex_lock(&ac_vnode->mlock); +} + +static int spm_cvdev_buf_init(struct vb2_buffer *vb) +{ + struct spm_ccic_vbuffer *ac_vb = to_ccic_vbuffer(vb); + + INIT_LIST_HEAD(&ac_vb->list_entry); + ac_vb->reset_flag = 0; + return 0; +} + +static int spm_cvdev_buf_prepare(struct vb2_buffer *vb) +{ + struct spm_ccic_vbuffer *ac_vb = to_ccic_vbuffer(vb); + struct spm_ccic_vnode *ac_vnode = container_of(vb->vb2_queue, struct spm_ccic_vnode, buf_queue); + + ac_vb->flags = 0; + memset(ac_vb->reserved, 0, AC_BUF_RESERVED_DATA_LEN); + //ac_vb->vb2_v4l2_buf.flags &= ~V4L2_BUF_FLAG_IGNOR; + ac_vb->vb2_v4l2_buf.flags = 0; + ac_vb->ac_vnode = ac_vnode; + return 0; +} + +static void spm_cvdev_buf_finish(struct vb2_buffer *vb) +{ +} + +static void spm_cvdev_buf_cleanup(struct vb2_buffer *vb) +{ + +} + +static int spm_cvdev_start_streaming(struct vb2_queue *q, unsigned int count) +{ + struct spm_ccic_vnode *ac_vnode = container_of(q, struct spm_ccic_vnode, buf_queue); + struct ccic_ctrl *ccic_ctrl = ac_vnode->ccic_dev->ctrl; + struct ccic_dma *ccic_dma = ac_vnode->ccic_dev->dma; + struct device *dev = ac_vnode->ccic_dev->dev; + struct spm_ccic_vbuffer *ac_vb = NULL; + int ret = 0, csi2idi = 0; + + pr_dbg("%s(%s)", __func__, ac_vnode->name); + ac_vnode->total_frm = 0; + ac_vnode->sw_err_frm = 0; + ac_vnode->hw_err_frm = 0; + ac_vnode->ok_frm = 0; + ac_vnode->frame_id = 0; + + if (ac_vnode->ccic_dev->index == 0) { + csi2idi = CCIC_CSI2IDI0; + } else { + csi2idi = CCIC_CSI2IDI1; + } + ret = ccic_ctrl->ops->config_csi2idi_mux(ccic_ctrl, ac_vnode->csi2vc, csi2idi, 1); + if (ret) { + dev_err(dev, "%s config mux(enable) failed ret=%d\n", __func__, ret); + return ret; + } + if (ac_vnode->ccic_mode == CCIC_MODE_NM) { + ret = ccic_ctrl->ops->config_csi2_mbus(ccic_ctrl, CCIC_CSI2VC_NM, 0, 0, 0, 0, ac_vnode->lane_num); + } else if (ac_vnode->ccic_mode == CCIC_MODE_VC) { + ret = ccic_ctrl->ops->config_csi2_mbus(ccic_ctrl, CCIC_CSI2VC_VC, ac_vnode->main_vc, ac_vnode->sub_vc, 0, 0, ac_vnode->lane_num); + } else { + ret = ccic_ctrl->ops->config_csi2_mbus(ccic_ctrl, CCIC_CSI2VC_VCDT, + ac_vnode->main_vc, ac_vnode->sub_vc, + ac_vnode->main_dt, ac_vnode->sub_dt, ac_vnode->lane_num); + } + if (ret) { + dev_err(dev, "%s config mbus(enable) lane=%d failed ret=%d\n", __func__, + ac_vnode->lane_num, ret); + return ret; + } + ccic_dma->ops->src_sel(ccic_dma, ac_vnode->src_sel, ac_vnode->main_ccic_id); + ccic_dma->ops->ccic_enable(ccic_dma, 1); + ccic_ctrl->ops->irq_mask(ccic_ctrl, 1); + ret = spm_cvdev_dq_idle_vbuffer(ac_vnode, &ac_vb); + if (ret) { + dev_info(dev, "%s no initial buffer available\n", __func__); + } else { + spm_cvdev_q_busy_vbuffer(ac_vnode, ac_vb); + } + if (ac_vb) { + ccic_update_dma_addr(ac_vnode, ac_vb, 0); + ccic_dma->ops->shadow_ready(ccic_dma); + } + ac_vnode->is_streaming = 1; + return 0; +} + +static void spm_cvdev_stop_streaming(struct vb2_queue *q) +{ + struct spm_ccic_vnode *ac_vnode = container_of(q, struct spm_ccic_vnode, buf_queue); + struct ccic_ctrl *ccic_ctrl = ac_vnode->ccic_dev->ctrl; + struct ccic_dma *ccic_dma = ac_vnode->ccic_dev->dma; + int csi2idi = 0; + unsigned long flags = 0; + //int ret = 0; + + pr_dbg("%s(%s) enter", __func__, ac_vnode->name); + + pr_notice("%s total_frm(%u) sw_err_frm(%u) hw_err_frm(%u) ok_frm(%u)\n", + ac_vnode->name, ac_vnode->total_frm, ac_vnode->sw_err_frm, ac_vnode->hw_err_frm, ac_vnode->ok_frm); + if (ac_vnode->ccic_dev->index == 0) { + csi2idi = CCIC_CSI2IDI0; + } else { + csi2idi = CCIC_CSI2IDI1; + } + spin_lock_irqsave(&(ac_vnode->waitq_head.lock), flags); + wait_event_interruptible_locked_irq(ac_vnode->waitq_head, + !ac_vnode->in_irq && !ac_vnode->in_tasklet); + ac_vnode->in_streamoff = 1; + spin_unlock_irqrestore(&(ac_vnode->waitq_head.lock), flags); + ccic_ctrl->ops->irq_mask(ccic_ctrl, 0); + //if (ac_vnode->ccic_mode == CCIC_MODE_NM) { + ccic_ctrl->ops->config_csi2_mbus(ccic_ctrl, CCIC_CSI2VC_NM, 0, 0, 0, 0, 0); + //} else { + // ccic_ctrl->ops->config_csi2_mbus(ccic_ctrl, CCIC_CSI2VC_VC, ac_vnode->main_vc, ac_vnode->sub_vc, 0); + //} + ccic_ctrl->ops->config_csi2idi_mux(ccic_ctrl, ac_vnode->csi2vc, csi2idi, 0); + ccic_dma->ops->ccic_enable(ccic_dma, 0); + ac_vnode->is_streaming = 0; + spin_lock_irqsave(&(ac_vnode->waitq_head.lock), flags); + ac_vnode->in_streamoff = 0; + spin_unlock_irqrestore(&(ac_vnode->waitq_head.lock), flags); + pr_dbg("%s(%s) leave", __func__, ac_vnode->name); +} + +static void spm_cvdev_buf_queue(struct vb2_buffer *vb) +{ + unsigned long flags = 0; + struct spm_ccic_vbuffer *ac_vb = to_ccic_vbuffer(vb); + struct vb2_queue *buf_queue = vb->vb2_queue; + struct spm_ccic_vnode *ac_vnode = container_of(buf_queue, struct spm_ccic_vnode, buf_queue); + //struct ccic_dma *ccic_dma = ac_vnode->ccic_dev->dma; + //unsigned int v4l2_buf_flags = ac_vnode->v4l2_buf_flags[vb->index]; + + spin_lock_irqsave(&ac_vnode->slock, flags); + atomic_inc(&ac_vnode->queued_buf_cnt); + list_add_tail(&ac_vb->list_entry, &ac_vnode->queued_list); + //if (ac_vnode->is_streaming) { + // if (__spm_cvdev_busy_list_empty(ac_vnode)) { + // __spm_cvdev_dq_idle_vbuffer(ac_vnode, &ac_vb); + // if (ac_vb) { + // __spm_cvdev_q_busy_vbuffer(ac_vnode, ac_vb); + // ccic_update_dma_addr(ac_vnode, ac_vb, 0); + // ccic_dma->ops->shadow_ready(ccic_dma); + // } + // } + //} + spin_unlock_irqrestore(&ac_vnode->slock, flags); +} + +static struct vb2_ops spm_ccic_vb2_ops = { + .queue_setup = spm_cvdev_queue_setup, + .wait_prepare = spm_cvdev_wait_prepare, + .wait_finish = spm_cvdev_wait_finish, + .buf_init = spm_cvdev_buf_init, + .buf_prepare = spm_cvdev_buf_prepare, + .buf_finish = spm_cvdev_buf_finish, + .buf_cleanup = spm_cvdev_buf_cleanup, + .start_streaming = spm_cvdev_start_streaming, + .stop_streaming = spm_cvdev_stop_streaming, + .buf_queue = spm_cvdev_buf_queue, +}; + +static void spm_cvdev_cancel_all_buffers(struct spm_ccic_vnode *ac_vnode) +{ + unsigned long flags = 0; + struct spm_ccic_vbuffer *pos = NULL, *n = NULL; + struct vb2_buffer *vb2_buf = NULL; + + spin_lock_irqsave(&ac_vnode->slock, flags); + list_for_each_entry_safe(pos, n, &ac_vnode->queued_list, list_entry) { + vb2_buf = &(pos->vb2_v4l2_buf.vb2_buf); + vb2_buffer_done(vb2_buf, VB2_BUF_STATE_ERROR); + list_del_init(&pos->list_entry); + atomic_dec(&ac_vnode->queued_buf_cnt); + } + list_for_each_entry_safe(pos, n, &ac_vnode->busy_list, list_entry) { + vb2_buf = &(pos->vb2_v4l2_buf.vb2_buf); + vb2_buffer_done(vb2_buf, VB2_BUF_STATE_ERROR); + list_del_init(&pos->list_entry); + atomic_dec(&ac_vnode->busy_buf_cnt); + } + spin_unlock_irqrestore(&ac_vnode->slock, flags); +} + +static int spm_cvdev_vidioc_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *b) +{ + struct video_device *vnode = video_devdata(file); + struct spm_ccic_vnode *ac_vnode = container_of(vnode, struct spm_ccic_vnode, vnode); + int ret = 0; + + mutex_lock(&ac_vnode->mlock); + ret = vb2_reqbufs(&ac_vnode->buf_queue, b); + mutex_unlock(&ac_vnode->mlock); + return ret; +} + +static int spm_cvdev_vidioc_querybuf(struct file *file, void *fh, struct v4l2_buffer *b) +{ + struct video_device *vnode = video_devdata(file); + struct spm_ccic_vnode *ac_vnode = container_of(vnode, struct spm_ccic_vnode, vnode); + int ret = 0; + + mutex_lock(&ac_vnode->mlock); + ret = vb2_querybuf(&ac_vnode->buf_queue, b); + mutex_unlock(&ac_vnode->mlock); + return ret; +} + +static int spm_cvdev_vidioc_qbuf(struct file *file, void *fh, struct v4l2_buffer *b) +{ + struct video_device *vnode = video_devdata(file); + struct spm_ccic_vnode *ac_vnode = container_of(vnode, struct spm_ccic_vnode, vnode); + int ret = 0; + unsigned int i = 0; + + ac_vnode->v4l2_buf_flags[b->index] = b->flags; + if (!b->m.planes) { + return -EINVAL; + } + for (i = 0; i < b->length; i++) { + ac_vnode->planes_offset[b->index][i] = b->m.planes[i].data_offset; + } + mutex_lock(&ac_vnode->mlock); + ret = vb2_qbuf(&ac_vnode->buf_queue, vnode->v4l2_dev->mdev, b); + mutex_unlock(&ac_vnode->mlock); + return ret; +} + +static int spm_cvdev_vidioc_expbuf(struct file *file, void *fh, struct v4l2_exportbuffer *e) +{ + struct video_device *vnode = video_devdata(file); + struct spm_ccic_vnode *ac_vnode = container_of(vnode, struct spm_ccic_vnode, vnode); + int ret = 0; + + mutex_lock(&ac_vnode->mlock); +#ifndef MODULE + ret = vb2_expbuf(&ac_vnode->buf_queue, e); +#endif + mutex_unlock(&ac_vnode->mlock); + return ret; +} + +static int spm_cvdev_vidioc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b) +{ + struct video_device *vnode = video_devdata(file); + struct spm_ccic_vnode *ac_vnode = container_of(vnode, struct spm_ccic_vnode, vnode); + int ret = 0; + + mutex_lock(&ac_vnode->mlock); + ret = vb2_dqbuf(&ac_vnode->buf_queue, b, file->f_flags & O_NONBLOCK); + mutex_unlock(&ac_vnode->mlock); + return ret; +} + +static int spm_cvdev_vidioc_create_bufs(struct file *file, void *fh, struct v4l2_create_buffers *b) +{ + struct video_device *vnode = video_devdata(file); + struct spm_ccic_vnode *ac_vnode = container_of(vnode, struct spm_ccic_vnode, vnode); + int ret = 0; + + mutex_lock(&ac_vnode->mlock); +#ifndef MODULE + ret = vb2_create_bufs(&ac_vnode->buf_queue, b); +#endif + mutex_unlock(&ac_vnode->mlock); + return ret; +} + +static int spm_cvdev_vidioc_prepare_buf(struct file *file, void *fh, struct v4l2_buffer *b) +{ + struct video_device *vnode = video_devdata(file); + struct spm_ccic_vnode *ac_vnode = container_of(vnode, struct spm_ccic_vnode, vnode); + int ret = 0; + + mutex_lock(&ac_vnode->mlock); + ret = vb2_prepare_buf(&ac_vnode->buf_queue, vnode->v4l2_dev->mdev, b); + mutex_unlock(&ac_vnode->mlock); + return ret; +} + +static int spm_cvdev_vidioc_streamon(struct file *file, void *fn, enum v4l2_buf_type i) +{ + int ret = 0; + struct video_device *vnode = video_devdata(file); + struct spm_ccic_vnode *ac_vnode = container_of(vnode, struct spm_ccic_vnode, vnode); + mutex_lock(&ac_vnode->mlock); + ret = vb2_streamon(&ac_vnode->buf_queue, i); + mutex_unlock(&ac_vnode->mlock); + return ret; +} + +static int spm_cvdev_vidioc_streamoff(struct file *file, void *fn, enum v4l2_buf_type i) +{ + int ret = 0; + struct video_device *vnode = video_devdata(file); + struct spm_ccic_vnode *ac_vnode = container_of(vnode, struct spm_ccic_vnode, vnode); + unsigned long flags = 0; + + pr_dbg("%s(%s) enter", __func__, ac_vnode->name); + pr_dbg("%s(%s) queued_buf_cnt=%d busy_buf_cnt=%d.", __func__, ac_vnode->name, atomic_read(&ac_vnode->queued_buf_cnt), atomic_read(&ac_vnode->busy_buf_cnt)); + spin_lock_irqsave(&ac_vnode->waitq_head.lock, flags); + wait_event_interruptible_locked_irq(ac_vnode->waitq_head, !ac_vnode->in_tasklet && !ac_vnode->in_irq); + ac_vnode->in_streamoff = 1; + spin_unlock_irqrestore(&ac_vnode->waitq_head.lock, flags); + pr_dbg("%s tasklet clean", ac_vnode->name); + mutex_lock(&ac_vnode->mlock); + pr_dbg("%s cancel all buffers", ac_vnode->name); + spm_cvdev_cancel_all_buffers(ac_vnode); + pr_dbg("%s streamoff", ac_vnode->name); + ret = vb2_streamoff(&ac_vnode->buf_queue, i); + mutex_unlock(&ac_vnode->mlock); + spin_lock_irqsave(&ac_vnode->waitq_head.lock, flags); + ac_vnode->in_streamoff = 0; + spin_unlock_irqrestore(&ac_vnode->waitq_head.lock, flags); + pr_dbg("%s(%s) leave", __func__, ac_vnode->name); + return ret; +} + +static int spm_cvdev_vidioc_querycap(struct file *file, void *fh, struct v4l2_capability *cap) +{ + struct video_device *vnode = video_devdata(file); + struct spm_ccic_vnode *ac_vnode = container_of(vnode, struct spm_ccic_vnode, vnode); + + strlcpy(cap->driver, ac_vnode->name, 16); + cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE_MPLANE; + cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE_MPLANE; + return 0; +} + +/* + * static int spm_cvdev_vidioc_enum_fmt_vid_cap_mplane(struct file *file, void *fh, struct v4l2_fmtdesc *f) + * { + * return 0; + * } + */ + +static int spm_cvdev_vidioc_g_fmt_vid_cap_mplane(struct file *file, void *fh, struct v4l2_format *f) +{ + struct video_device *vnode = video_devdata(file); + struct spm_ccic_vnode *ac_vnode = container_of(vnode, struct spm_ccic_vnode, vnode); + + pr_dbg("get format fourcc code[0x%08x] (%dx%d)", + ac_vnode->cur_fmt.fmt.pix_mp.pixelformat, + ac_vnode->cur_fmt.fmt.pix_mp.width, + ac_vnode->cur_fmt.fmt.pix_mp.height); + *f = ac_vnode->cur_fmt; + return 0; +} + +static int __spm_cvdev_vidioc_s_fmt_vid_cap_mplane(struct file *file, void *fh, struct v4l2_format *f) +{ + struct video_device *vnode = video_devdata(file); + struct spm_ccic_vnode *ac_vnode = container_of(vnode, struct spm_ccic_vnode, vnode); + struct vb2_queue *vb2_queue = &ac_vnode->buf_queue; + struct ccic_dma *ccic_dma = ac_vnode->ccic_dev->dma; + struct device *dev = ac_vnode->ccic_dev->dev; + int ret = 0, bit_depth = 0; + unsigned int fmt_code = 0, width = 0, height = 0; + + pr_dbg("set format fourcc code[0x%08x] (%dx%d)", + f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.width, f->fmt.pix_mp.height); + width = f->fmt.pix_mp.width; + height = f->fmt.pix_mp.height; + if (vb2_is_streaming(vb2_queue)) { + pr_err("%s set format not allowed while streaming on.\n", __func__); + return -EBUSY; + } + ret = spm_cvdev_lookup_formats_table(f, &bit_depth); + if (ret) { + pr_err("%s failed to lookup formats table fourcc code[0x%08x]\n", __func__, f->fmt.pix_mp.pixelformat); + return ret; + } + + if (bit_depth == 8) { + fmt_code = MEDIA_BUS_FMT_SBGGR8_1X8; + } else if (bit_depth == 10) { + fmt_code = MEDIA_BUS_FMT_SBGGR10_1X10; + } else if (bit_depth == 12) { + fmt_code = MEDIA_BUS_FMT_SBGGR12_1X12; + } else if (bit_depth == 16) { + fmt_code = MEDIA_BUS_FMT_UYVY8_2X8; + width *= 2; + } else { + dev_err(dev, "unknown bit_depth=%d\n", bit_depth); + return -EINVAL; + } + ret = ccic_dma->ops->set_fmt(ccic_dma, width, height, fmt_code); + if (ret) { + dev_err(dev, "%s set fmt(%ux%u code:0x%08x) failed\n", __func__, + width, height, fmt_code); + return ret; + } + spm_cvdev_fill_v4l2_format(f); + ac_vnode->cur_fmt = *f; + + return 0; +} + +static int spm_cvdev_vidioc_s_fmt_vid_cap_mplane(struct file *file, void *fh, struct v4l2_format *f) +{ + struct video_device *vnode = video_devdata(file); + struct spm_ccic_vnode *ac_vnode = container_of(vnode, struct spm_ccic_vnode, vnode); + int ret = 0; + + mutex_lock(&ac_vnode->mlock); + ret = __spm_cvdev_vidioc_s_fmt_vid_cap_mplane(file, fh, f); + mutex_unlock(&ac_vnode->mlock); + return ret; +} + +static int spm_cvdev_vidioc_try_fmt_vid_cap_mplane(struct file *file, void *fh, struct v4l2_format *f) +{ + return 0; +} + +static long spm_cvdev_vidioc_default(struct file *file, + void *fh, + bool valid_prio, + unsigned int cmd, + void *arg) +{ + struct video_device *vnode = video_devdata(file); + struct spm_ccic_vnode *ac_vnode = container_of(vnode, struct spm_ccic_vnode, vnode); + struct device *dev = ac_vnode->ccic_dev->dev; + struct v4l2_ccic_params *ccic_params = NULL; + //int ret = 0; + + switch (cmd) { + case VIDIOC_CCIC_S_PARAMS: + ccic_params = (struct v4l2_ccic_params*)arg; + if (ac_vnode->is_streaming) { + dev_err(dev, "%s set params failed, device is busy now\n", ac_vnode->name); + return -EBUSY; + } + ac_vnode->lane_num = ccic_params->lane_num; + ac_vnode->ccic_mode = ccic_params->ccic_mode; + ac_vnode->ch_mode = ccic_params->ch_mode; + ac_vnode->main_vc = ccic_params->main_vc; + ac_vnode->sub_vc = ccic_params->sub_vc; + ac_vnode->main_dt = ccic_params->main_dt; + ac_vnode->sub_dt = ccic_params->sub_dt; + ac_vnode->main_ccic_id = ccic_params->main_ccic_id; + if (ac_vnode->ch_mode == CCIC_CH_MODE_MAIN) { + if (ac_vnode->ccic_mode == CCIC_MODE_NM) { + ac_vnode->src_sel = CCIC_DMA_SEL_LOCAL_MAIN; + } else { + ac_vnode->src_sel = CCIC_DMA_SEL_LOCAL_VCDT; + } + } else { + ac_vnode->src_sel = CCIC_DMA_SEL_REMOTE_VCDT; + } + dev_info(dev, "%s set mipi lane:%u ccic_mode=%d ch_mode=%d main_vc=%u sub_vc=%u main_dt=%u sub_dt=%u main_ccic_id=%u\n", + ac_vnode->name, ccic_params->lane_num, ccic_params->ccic_mode, + ccic_params->ch_mode, ccic_params->main_vc, ccic_params->sub_vc, + ccic_params->main_dt, ccic_params->sub_dt, ccic_params->main_ccic_id); + break; + default: + pr_err("unknown ioctl cmd(%d)\n", cmd); + return -ENOIOCTLCMD; + } + + return 0; +} + +static struct v4l2_ioctl_ops spm_ccic_v4l2_ioctl_ops = { + /* VIDIOC_QUERYCAP handler */ + .vidioc_querycap = spm_cvdev_vidioc_querycap, + /* VIDIOC_ENUM_FMT handlers */ + /* .vidioc_enum_fmt_vid_cap = spm_cvdev_vidioc_enum_fmt_vid_cap_mplane, */ + /* VIDIOC_G_FMT handlers */ + .vidioc_g_fmt_vid_cap_mplane = spm_cvdev_vidioc_g_fmt_vid_cap_mplane, + /* VIDIOC_S_FMT handlers */ + .vidioc_s_fmt_vid_cap_mplane = spm_cvdev_vidioc_s_fmt_vid_cap_mplane, + /* VIDIOC_TRY_FMT handlers */ + .vidioc_try_fmt_vid_cap_mplane = spm_cvdev_vidioc_try_fmt_vid_cap_mplane, + /* Buffer handlers */ + .vidioc_reqbufs = spm_cvdev_vidioc_reqbufs, + .vidioc_querybuf = spm_cvdev_vidioc_querybuf, + .vidioc_qbuf = spm_cvdev_vidioc_qbuf, + .vidioc_expbuf = spm_cvdev_vidioc_expbuf, + .vidioc_dqbuf = spm_cvdev_vidioc_dqbuf, + .vidioc_create_bufs = spm_cvdev_vidioc_create_bufs, + .vidioc_prepare_buf = spm_cvdev_vidioc_prepare_buf, + .vidioc_streamon = spm_cvdev_vidioc_streamon, + .vidioc_streamoff = spm_cvdev_vidioc_streamoff, + .vidioc_default = spm_cvdev_vidioc_default, +}; + +static int spm_cvdev_open(struct file *file) +{ + struct video_device *vnode = video_devdata(file); + struct spm_ccic_vnode *ac_vnode = container_of(vnode, struct spm_ccic_vnode, vnode); + struct ccic_ctrl *ccic_ctrl = ac_vnode->ccic_dev->ctrl; + struct ccic_dma *ccic_dma = ac_vnode->ccic_dev->dma; + struct device *dev = ac_vnode->ccic_dev->dev; + int ret = 0; + pr_dbg("%s in, open vnode(%s - %s).", __func__, ac_vnode->name, video_device_node_name(vnode)); + + if (atomic_inc_return(&ac_vnode->ref_cnt) != 1) { + pr_err("vnode(%s - %s) was already openned.\n", ac_vnode->name, video_device_node_name(vnode)); + atomic_dec(&ac_vnode->ref_cnt); + return -EBUSY; + } + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + dev_err(dev, "%s rpm get failed ret=%d\n", ac_vnode->name, ret); + return ret; + } + ccic_ctrl->ops->clk_enable(ccic_ctrl, 1); + ccic_dma->ops->clk_enable(ccic_dma, 1); + pr_dbg("%s exit, open vnode(%s - %s).", __func__, ac_vnode->name, video_device_node_name(vnode)); + return 0; +} + +static void __spm_cvdev_close(struct spm_ccic_vnode *ac_vnode) +{ + unsigned long flags = 0; + + pr_dbg("%s(%s) enter", __func__, ac_vnode->name); + pr_dbg("%s(%s) queued_buf_cnt=%d busy_buf_cnt=%d.", __func__, ac_vnode->name, atomic_read(&ac_vnode->queued_buf_cnt), atomic_read(&ac_vnode->busy_buf_cnt)); + spin_lock_irqsave(&ac_vnode->waitq_head.lock, flags); + ac_vnode->in_streamoff = 1; + wait_event_interruptible_locked_irq(ac_vnode->waitq_head, !ac_vnode->in_tasklet && !ac_vnode->in_irq); + spin_unlock_irqrestore(&ac_vnode->waitq_head.lock, flags); + pr_dbg("%s tasklet clean", ac_vnode->name); + mutex_lock(&ac_vnode->mlock); + pr_dbg("%s cancel all buffers", ac_vnode->name); + spm_cvdev_cancel_all_buffers(ac_vnode); + pr_dbg("%s queue release", ac_vnode->name); + vb2_queue_release(&ac_vnode->buf_queue); + ac_vnode->buf_queue.owner = NULL; + ac_vnode->is_streaming = 0; + mutex_unlock(&ac_vnode->mlock); + spin_lock_irqsave(&ac_vnode->waitq_head.lock, flags); + ac_vnode->in_streamoff = 0; + spin_unlock_irqrestore(&ac_vnode->waitq_head.lock, flags); + pr_dbg("%s(%s) leave", __func__, ac_vnode->name); +} + +static int spm_cvdev_close(struct file *file) +{ + struct video_device *vnode = video_devdata(file); + struct spm_ccic_vnode *ac_vnode = container_of(vnode, struct spm_ccic_vnode, vnode); + struct ccic_ctrl *ccic_ctrl = ac_vnode->ccic_dev->ctrl; + struct ccic_dma *ccic_dma = ac_vnode->ccic_dev->dma; + struct device *dev = ac_vnode->ccic_dev->dev; + + if (atomic_dec_and_test(&ac_vnode->ref_cnt)) { + __spm_cvdev_close(ac_vnode); + ccic_dma->ops->clk_enable(ccic_dma, 0); + ccic_ctrl->ops->clk_enable(ccic_ctrl, 0); + pm_runtime_put_sync(dev); + } + + return v4l2_fh_release(file); +} + +static __poll_t spm_cvdev_poll(struct file *file, struct poll_table_struct *wait) +{ + __poll_t ret; + struct video_device *vnode = video_devdata(file); + struct spm_ccic_vnode *ac_vnode = container_of(vnode, struct spm_ccic_vnode, vnode); + + ret = vb2_poll(&ac_vnode->buf_queue, file, wait); + + return ret; +} + +#ifdef SPM_CONFIG_COMPAT + +static int alloc_userspace(unsigned int size, u32 aux_space, + void __user **new_p64) +{ + *new_p64 = compat_alloc_user_space(size + aux_space); + if (!*new_p64) + return -ENOMEM; + if (clear_user(*new_p64, size)) + return -EFAULT; + return 0; +} + +long spm_cvdev_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg) +{ + void __user *p32 = compat_ptr(arg); + void __user *new_p64 = NULL; + //void __user *aux_buf; + //u32 aux_space; + long err = 0; + const size_t ioc_size = _IOC_SIZE(cmd); + //size_t ioc_size64 = 0; + + //if (_IOC_TYPE(cmd) == 'V') { + // switch (_IOC_NR(cmd)) { + // //int r + // case _IOC_NR(VIDIOC_G_SLICE_MODE): + // case _IOC_NR(VIDIOC_CPU_Z1): + // //int w + // case _IOC_NR(VIDIOC_PUT_PIPELINE): + // case _IOC_NR(VIDIOC_RESET_PIPELINE): + // ioc_size64 = sizeof(int); + // break; + // //unsigned int + // case _IOC_NR(VIDIOC_G_PIPE_STATUS): + // ioc_size64 = sizeof(int); + // break; + // case _IOC_NR(VIDIOC_S_PORT_CFG): + // ioc_size64 = sizeof(struct v4l2_vi_port_cfg); + // break; + // case _IOC_NR(VIDIOC_DBG_REG_WRITE): + // case _IOC_NR(VIDIOC_DBG_REG_READ): + // ioc_size64 = sizeof(struct v4l2_vi_dbg_reg); + // break; + // case _IOC_NR(VIDIOC_CFG_INPUT_INTF): + // ioc_size64 = sizeof(struct v4l2_vi_input_interface); + // break; + // case _IOC_NR(VIDIOC_SET_SELECTION): + // ioc_size64 = sizeof(struct v4l2_vi_selection); + // break; + // case _IOC_NR(VIDIOC_QUERY_SLICE_READY): + // ioc_size64 = sizeof(struct v4l2_vi_slice_info); + // break; + // case _IOC_NR(VIDIOC_S_BANDWIDTH): + // ioc_size64 = sizeof(struct v4l2_vi_bandwidth_info); + // break; + // case _IOC_NR(VIDIOC_G_ENTITY_INFO): + // ioc_size64 = sizeof(struct v4l2_vi_entity_info); + // break; + // } + // pr_dbg("%s cmd_nr=%d ioc_size32=%u ioc_size64=%u",__func__, _IOC_NR(cmd), ioc_size, ioc_size64); + //} + if (_IOC_DIR(cmd) != _IOC_NONE) { + err = alloc_userspace(ioc_size, 0, &new_p64); + if (err) { + pr_err("%s alloc userspace failed err=%l cmd=%d ioc_size=%u\n", __func__, err, _IOC_NR(cmd), ioc_size); + return err; + } + if ((_IOC_DIR(cmd) & _IOC_WRITE)) { + err = copy_in_user(new_p64, p32, ioc_size); + if (err) { + pr_err("%s copy in user 1 failed err=%l cmd=%d ioc_size=%u\n", __func__, err, _IOC_NR(cmd), ioc_size); + return err; + } + } + } + + err = video_ioctl2(file, cmd, (unsigned long)new_p64); + if (err) { + return err; + } + + if ((_IOC_DIR(cmd) & _IOC_READ)) { + err = copy_in_user(p32, new_p64, ioc_size); + if (err) { + pr_err("%s copy in user 2 failed err=%l cmd=%d ioc_size=%u\n", __func__, err, _IOC_NR(cmd), ioc_size); + return err; + } + } + + //switch (cmd) { + // //int r + // case VIDIOC_G_SLICE_MODE: + // case VIDIOC_CPU_Z1: + // //int w + // case VIDIOC_PUT_PIPELINE: + // case VIDIOC_RESET_PIPELINE: + // err = alloc_userspace(sizeof(int), 0, &new_p64); + // if (!err && assign_in_user((int __user *)new_p64, + // (compat_int_t __user *)p32)) + // err = -EFAULT; + // break; + // //unsigned int + // case VIDIOC_G_PIPE_STATUS: + // err = alloc_userspace(sizeof(unsigned int), 0, &new_p64); + // if (!err && assign_in_user((unsigned int __user *)new_p64, + // (compat_uint_t __user *)p32)) + // err = -EFAULT; + // break; + // case VIDIOC_S_PORT_CFG: + // err = alloc_userspace(sizeof(struct v4l2_vi_port_cfg), 0, &new_p64); + // if (!err) { + // err = -EFAULT; + // break; + // } + // break; + // case VIDIOC_DBG_REG_WRITE: + // case VIDIOC_DBG_REG_READ: + // break; + // case VIDIOC_CFG_INPUT_INTF: + // break; + // case VIDIOC_SET_SELECTION: + // break; + // case VIDIOC_QUERY_SLICE_READY: + // break; + // case VIDIOC_S_BANDWIDTH: + // break; + // case VIDIOC_G_ENTITY_INFO: + // break; + + //} + //if (err) + // return err; + return 0; +} +#endif + +static struct v4l2_file_operations spm_ccic_file_operations = { + .owner = THIS_MODULE, + .poll = spm_cvdev_poll, + .unlocked_ioctl = video_ioctl2, + .open = spm_cvdev_open, + .release = spm_cvdev_close, +#ifdef SPM_CONFIG_COMPAT + .compat_ioctl32 = spm_cvdev_compat_ioctl32, +#endif +}; + +static void spm_cvdev_release(struct video_device *vdev) +{ + struct spm_ccic_vnode *ac_vnode = container_of(vdev, struct spm_ccic_vnode, vnode); + + pr_dbg("%s(%s %s) enter.", __func__, ac_vnode->name, video_device_node_name(&ac_vnode->vnode)); + mutex_destroy(&ac_vnode->mlock); +} +/* +static void spm_cvdev_block_release(struct spm_ccic_block *b) +{ + struct spm_ccic_vnode *ac_vnode = container_of(b, struct spm_ccic_vnode, ac_block); + + pr_dbg("%s(%s %s) enter.", __func__, ac_vnode->name, video_device_node_name(&ac_vnode->vnode)); + vb2_queue_release(&ac_vnode->buf_queue); + video_unregister_device(&ac_vnode->vnode); +} +*/ + +void spm_cvdev_destroy_vnode(struct spm_ccic_vnode *ac_vnode) +{ + video_unregister_device(&ac_vnode->vnode); +} + +struct spm_ccic_vnode* spm_cvdev_create_vnode(const char *name, + unsigned int idx, + struct v4l2_device *v4l2_dev, + struct device *alloc_dev, + struct ccic_dev *ccic_dev, + void (*dma_tasklet_handler)(unsigned long), + unsigned int min_buffers_needed) +{ + int ret = 0, i = 0; + struct spm_ccic_vnode *ac_vnode = NULL; + struct ccic_dma_context *dma_ctx = NULL; + struct ccic_dma_work_struct *ccic_dma_work = NULL; + + if (NULL == name || NULL == v4l2_dev || NULL == alloc_dev || NULL == ccic_dev) { + pr_err("%s invalid arguments.\n", __func__); + return NULL; + } + ac_vnode = devm_kzalloc(alloc_dev, sizeof(*ac_vnode), GFP_KERNEL); + if (NULL == ac_vnode) { + pr_err("%s failed to alloc mem for spm_ccic_vnode(%s).\n", __func__, name); + return NULL; + } + dma_ctx = &ac_vnode->dma_ctx; + dma_ctx->ac_vnode = ac_vnode; + INIT_LIST_HEAD(&dma_ctx->dma_work_idle_list); + INIT_LIST_HEAD(&dma_ctx->dma_work_busy_list); + spin_lock_init(&dma_ctx->slock); + for (i = 0; i < CCIC_DMA_WORK_MAX_CNT; i++) { + ccic_dma_work = devm_kzalloc(alloc_dev, sizeof(*ccic_dma_work), GFP_KERNEL); + if (!ccic_dma_work) { + dev_err(alloc_dev, "%s not enough mem\n", __func__); + return NULL; + } + tasklet_init(&ccic_dma_work->dma_tasklet, dma_tasklet_handler, (unsigned long)ccic_dma_work); + INIT_LIST_HEAD(&ccic_dma_work->idle_list_entry); + INIT_LIST_HEAD(&ccic_dma_work->busy_list_entry); + ccic_dma_work->ac_vnode = ac_vnode; + list_add(&ccic_dma_work->idle_list_entry, &dma_ctx->dma_work_idle_list); + } + ac_vnode->csi2vc = CCIC_CSI2VC_MAIN; + ac_vnode->src_sel = CCIC_DMA_SEL_LOCAL_MAIN; + ac_vnode->lane_num = 1; + ac_vnode->in_streamoff = 0; + ac_vnode->in_irq = 0; + ac_vnode->in_tasklet = 0; + INIT_LIST_HEAD(&ac_vnode->queued_list); + INIT_LIST_HEAD(&ac_vnode->busy_list); + atomic_set(&ac_vnode->queued_buf_cnt, 0); + atomic_set(&ac_vnode->busy_buf_cnt, 0); + spin_lock_init(&ac_vnode->slock); + mutex_init(&ac_vnode->mlock); + init_waitqueue_head(&ac_vnode->waitq_head); + ac_vnode->idx = idx; + ac_vnode->buf_queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC | V4L2_BUF_FLAG_TSTAMP_SRC_SOE; + ac_vnode->buf_queue.buf_struct_size = sizeof(struct spm_ccic_vbuffer); + ac_vnode->buf_queue.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; + ac_vnode->buf_queue.io_modes = VB2_DMABUF; + ac_vnode->buf_queue.ops = &spm_ccic_vb2_ops; + ac_vnode->buf_queue.mem_ops = &vb2_dma_contig_memops; + ac_vnode->buf_queue.min_buffers_needed = min_buffers_needed; + ac_vnode->buf_queue.dev = alloc_dev; + ret = vb2_queue_init(&ac_vnode->buf_queue); + if (ret) { + pr_err("%s vb2_queue_init failed for spm_ccic_vnode(%s).\n", __func__, name); + goto queue_init_fail; + } + + strlcpy(ac_vnode->vnode.name, name, 32); + strlcpy(ac_vnode->name, name, 32); + ac_vnode->ccic_dev = ccic_dev; + ac_vnode->vnode.queue = &ac_vnode->buf_queue; + ac_vnode->vnode.fops = &spm_ccic_file_operations; + ac_vnode->vnode.ioctl_ops = &spm_ccic_v4l2_ioctl_ops; + ac_vnode->vnode.release = spm_cvdev_release; + ac_vnode->vnode.device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE_MPLANE; + ac_vnode->vnode.v4l2_dev = v4l2_dev; + ret = __video_register_device(&ac_vnode->vnode, VFL_TYPE_VIDEO, -1, 1, THIS_MODULE); + if (ret) { + pr_err("%s video dev register failed for spm_ccic_vnode(%s).\n", __func__, name); + goto vdev_register_fail; + } + ccic_dev->vnode = ac_vnode; + pr_dbg("create vnode(%s - %s) successfully.", name, video_device_node_name(&ac_vnode->vnode)); + return ac_vnode; +vdev_register_fail: + vb2_queue_release(&ac_vnode->buf_queue); +queue_init_fail: + devm_kfree(alloc_dev, ac_vnode); + return NULL; +} + +int __spm_cvdev_dq_idle_vbuffer(struct spm_ccic_vnode *ac_vnode, struct spm_ccic_vbuffer **ac_vb) +{ + *ac_vb = list_first_entry_or_null(&ac_vnode->queued_list, struct spm_ccic_vbuffer, list_entry); + if (NULL == *ac_vb) + return -1; + list_del_init(&(*ac_vb)->list_entry); + atomic_dec(&ac_vnode->queued_buf_cnt); + return 0; +} + +int __spm_cvdev_q_idle_vbuffer(struct spm_ccic_vnode *ac_vnode, struct spm_ccic_vbuffer *ac_vb) +{ + list_add_tail(&ac_vb->list_entry, &ac_vnode->queued_list); + atomic_inc(&ac_vnode->queued_buf_cnt); + return 0; +} + +int spm_cvdev_dq_idle_vbuffer(struct spm_ccic_vnode *ac_vnode, struct spm_ccic_vbuffer **ac_vb) +{ + unsigned long flags = 0; + int ret = 0; + + spin_lock_irqsave(&ac_vnode->slock, flags); + ret = __spm_cvdev_dq_idle_vbuffer(ac_vnode, ac_vb); + spin_unlock_irqrestore(&ac_vnode->slock, flags); + return ret; +} + +int spm_cvdev_q_idle_vbuffer(struct spm_ccic_vnode *ac_vnode, struct spm_ccic_vbuffer *ac_vb) +{ + unsigned long flags = 0; + int ret = 0; + + spin_lock_irqsave(&ac_vnode->slock, flags); + ret = __spm_cvdev_q_idle_vbuffer(ac_vnode, ac_vb); + spin_unlock_irqrestore(&ac_vnode->slock, flags); + + return ret; +} + +int spm_cvdev_pick_idle_vbuffer(struct spm_ccic_vnode *ac_vnode, struct spm_ccic_vbuffer **ac_vb) +{ + unsigned long flags = 0; + + spin_lock_irqsave(&ac_vnode->slock, flags); + *ac_vb = list_first_entry_or_null(&ac_vnode->queued_list, struct spm_ccic_vbuffer, list_entry); + spin_unlock_irqrestore(&ac_vnode->slock, flags); + if (NULL == *ac_vb) { + return -1; + } + return 0; +} + +int __spm_cvdev_pick_idle_vbuffer(struct spm_ccic_vnode *ac_vnode, struct spm_ccic_vbuffer **ac_vb) +{ + *ac_vb = list_first_entry_or_null(&ac_vnode->queued_list, struct spm_ccic_vbuffer, list_entry); + if (NULL == *ac_vb) { + return -1; + } + return 0; +} + +int __spm_cvdev_dq_busy_vbuffer(struct spm_ccic_vnode *ac_vnode, struct spm_ccic_vbuffer **ac_vb) +{ + *ac_vb = list_first_entry_or_null(&ac_vnode->busy_list, struct spm_ccic_vbuffer, list_entry); + if (NULL == *ac_vb) + return -1; + list_del_init(&(*ac_vb)->list_entry); + atomic_dec(&ac_vnode->busy_buf_cnt); + return 0; +} + +int __spm_cvdev_q_busy_vbuffer(struct spm_ccic_vnode *ac_vnode, struct spm_ccic_vbuffer *ac_vb) +{ + list_add_tail(&ac_vb->list_entry, &ac_vnode->busy_list); + atomic_inc(&ac_vnode->busy_buf_cnt); + return 0; +} + +int spm_cvdev_dq_busy_vbuffer(struct spm_ccic_vnode *ac_vnode, struct spm_ccic_vbuffer **ac_vb) +{ + unsigned long flags = 0; + int ret = 0; + + spin_lock_irqsave(&ac_vnode->slock, flags); + ret = __spm_cvdev_dq_busy_vbuffer(ac_vnode, ac_vb); + spin_unlock_irqrestore(&ac_vnode->slock, flags); + return ret; +} + +int spm_cvdev_pick_busy_vbuffer(struct spm_ccic_vnode *ac_vnode, struct spm_ccic_vbuffer **ac_vb) +{ + unsigned long flags = 0; + + spin_lock_irqsave(&ac_vnode->slock, flags); + *ac_vb = list_first_entry_or_null(&ac_vnode->busy_list, struct spm_ccic_vbuffer, list_entry); + spin_unlock_irqrestore(&ac_vnode->slock, flags); + if (NULL == *ac_vb) + return -1; + + return 0; +} + +int __spm_cvdev_pick_busy_vbuffer(struct spm_ccic_vnode *ac_vnode, struct spm_ccic_vbuffer **ac_vb) +{ + *ac_vb = list_first_entry_or_null(&ac_vnode->busy_list, struct spm_ccic_vbuffer, list_entry); + if (NULL == *ac_vb) + return -1; + + return 0; +} + +int spm_cvdev_q_busy_vbuffer(struct spm_ccic_vnode *ac_vnode, struct spm_ccic_vbuffer *ac_vb) +{ + unsigned long flags = 0; + int ret = 0; + + spin_lock_irqsave(&ac_vnode->slock, flags); + ret = __spm_cvdev_q_busy_vbuffer(ac_vnode, ac_vb); + spin_unlock_irqrestore(&ac_vnode->slock, flags); + return ret; +} + +int spm_cvdev_export_ccic_vbuffer(struct spm_ccic_vbuffer *ac_vb, int with_error) +{ + struct vb2_buffer *vb = &ac_vb->vb2_v4l2_buf.vb2_buf; + if (with_error) + vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); + else + vb2_buffer_done(vb, VB2_BUF_STATE_DONE); + return 0; +} + +int __spm_cvdev_busy_list_empty(struct spm_ccic_vnode *ac_vnode) +{ + return list_empty(&ac_vnode->busy_list); +} + +int spm_cvdev_busy_list_empty(struct spm_ccic_vnode *ac_vnode) +{ + unsigned long flags = 0; + int ret = 0; + + spin_lock_irqsave(&ac_vnode->slock, flags); + ret = __spm_cvdev_busy_list_empty(ac_vnode); + spin_unlock_irqrestore(&ac_vnode->slock, flags); + return ret; +} + +int __spm_cvdev_idle_list_empty(struct spm_ccic_vnode *ac_vnode) +{ + return list_empty(&ac_vnode->queued_list); +} + +int spm_cvdev_idle_list_empty(struct spm_ccic_vnode *ac_vnode) +{ + unsigned long flags = 0; + int ret = 0; + + spin_lock_irqsave(&ac_vnode->slock, flags); + ret = __spm_cvdev_idle_list_empty(ac_vnode); + spin_unlock_irqrestore(&ac_vnode->slock, flags); + return ret; +} diff --git a/drivers/media/platform/spacemit/camera/cam_ccic/ccic_vdev.h b/drivers/media/platform/spacemit/camera/cam_ccic/ccic_vdev.h new file mode 100644 index 000000000000..c061d4b0f256 --- /dev/null +++ b/drivers/media/platform/spacemit/camera/cam_ccic/ccic_vdev.h @@ -0,0 +1,174 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * vdev.h - video divece functions + * + * Copyright(C) 2019 SPM Micro Limited + */ + +#ifndef _SPM_VDEV_H_ +#define _SPM_VDEV_H_ +#include +#include +#include +#include +#include +#include "ccic_drv.h" + +#define CCIC_DMA_WORK_MAX_CNT (16) + +struct spm_ccic_vbuffer; +struct spm_ccic_vnode; + +struct ccic_dma_context +{ + struct list_head dma_work_idle_list; + struct list_head dma_work_busy_list; + spinlock_t slock; + struct spm_ccic_vnode *ac_vnode; +}; + +struct spm_ccic_vnode { + struct video_device vnode; + char name[32]; + struct vb2_queue buf_queue; + struct list_head queued_list; + struct list_head busy_list; + struct ccic_dma_context dma_ctx; + atomic_t queued_buf_cnt; + atomic_t busy_buf_cnt; + atomic_t ref_cnt; + spinlock_t slock; + struct mutex mlock; + struct v4l2_format cur_fmt; + struct wait_queue_head waitq_head; + int in_streamoff; + int in_tasklet; + int in_irq; + int is_streaming; + unsigned int idx; + unsigned int total_frm; + unsigned int sw_err_frm; + unsigned int hw_err_frm; + unsigned int ok_frm; + unsigned int planes_offset[VB2_MAX_FRAME][VB2_MAX_PLANES]; + unsigned int v4l2_buf_flags[VB2_MAX_FRAME]; + struct ccic_dev *ccic_dev; + int csi2vc; + int src_sel; + int lane_num; + int ccic_mode; + int ch_mode; + unsigned int main_ccic_id; + unsigned int main_vc; + unsigned int sub_vc; + unsigned int main_dt; + unsigned int sub_dt; + uint64_t frame_id; + void *usr_data; +}; + +struct ccic_dma_work_struct { + struct tasklet_struct dma_tasklet; + struct list_head idle_list_entry; + struct list_head busy_list_entry; + unsigned int irq_status; + struct spm_ccic_vnode *ac_vnode; +}; + +#define AC_BUF_FLAG_SOF_TOUCH (1 << 0) +#define AC_BUF_FLAG_DONE_TOUCH (1 << 1) +#define AC_BUF_FLAG_HW_ERR (1 << 2) +#define AC_BUF_FLAG_SW_ERR (1 << 3) +#define AC_BUF_FLAG_TIMESTAMPED (1 << 4) +#define AC_BUF_FLAG_CCIC_TOUCH (1 << 5) + +#define AC_BUF_RESERVED_DATA_LEN (32) +struct spm_ccic_vbuffer { + struct vb2_v4l2_buffer vb2_v4l2_buf; + struct list_head list_entry; + unsigned int reset_flag; + unsigned int flags; + struct spm_ccic_vnode *ac_vnode; + unsigned char reserved[AC_BUF_RESERVED_DATA_LEN]; +}; + +#define vb2_buffer_to_spm_ccic_vbuffer(vb) ((struct spm_ccic_vbuffer*)(vb)) + +#define CAM_ALIGN(a, b) ({ \ + unsigned int ___tmp1 = (a); \ + unsigned int ___tmp2 = (b); \ + unsigned int ___tmp3 = ___tmp1 % ___tmp2; \ + ___tmp1 /= ___tmp2; \ + if (___tmp3) \ + ___tmp1++; \ + ___tmp1 *= ___tmp2; \ + ___tmp1; \ + }) + +#define is_vnode_streaming(vnode) ((vnode)->buf_queue.streaming) + +static inline dma_addr_t spm_vb2_buf_paddr(struct vb2_buffer *vb, unsigned int plane_no) +{ + unsigned int offset = 0; + dma_addr_t paddr = 0; + struct spm_ccic_vbuffer *ac_vb = vb2_buffer_to_spm_ccic_vbuffer(vb); + struct spm_ccic_vnode *ac_vnode = ac_vb->ac_vnode; + dma_addr_t *dma_addr = (dma_addr_t*)vb2_plane_cookie(vb, plane_no); + + BUG_ON(!ac_vnode); + offset = ac_vnode->planes_offset[vb->index][plane_no]; + paddr = *dma_addr + offset; + return paddr; +} + +static inline void ccic_update_dma_addr(struct spm_ccic_vnode *ac_vnode, + struct spm_ccic_vbuffer *ac_vbuf, unsigned int offset) +{ + dma_addr_t p0 = 0; + struct ccic_dma *ccic_dma = ac_vnode->ccic_dev->dma; + struct vb2_buffer *vb2_buf = &(ac_vbuf->vb2_v4l2_buf.vb2_buf); + + p0 = spm_vb2_buf_paddr(vb2_buf, 0) + offset; + ccic_dma->ops->set_addr(ccic_dma, p0, 0, 0); +} + +static inline void* ac_vnode_get_usrdata(struct spm_ccic_vnode *ac_vnode) +{ + return ac_vnode->usr_data; +} + +static inline struct spm_ccic_vbuffer* to_ccic_vbuffer(struct vb2_buffer *vb2) +{ + struct vb2_v4l2_buffer *vb2_v4l2_buf = to_vb2_v4l2_buffer(vb2); + return container_of(vb2_v4l2_buf, struct spm_ccic_vbuffer, vb2_v4l2_buf); +} + +struct spm_ccic_vnode* spm_cvdev_create_vnode(const char *name, + unsigned int idx, + struct v4l2_device *v4l2_dev, + struct device *alloc_dev, + struct ccic_dev *ccic_dev, + void (*dma_tasklet_handler)(unsigned long), + unsigned int min_buffers_needed); +void spm_cvdev_destroy_vnode(struct spm_ccic_vnode *ac_vnode); + + +int spm_cvdev_busy_list_empty(struct spm_ccic_vnode *ac_vnode); +int __spm_cvdev_busy_list_empty(struct spm_ccic_vnode *ac_vnode); +int spm_cvdev_idle_list_empty(struct spm_ccic_vnode *ac_vnode); +int __spm_cvdev_idle_list_empty(struct spm_ccic_vnode *ac_vnode); +int spm_cvdev_dq_idle_vbuffer(struct spm_ccic_vnode *ac_vnode, struct spm_ccic_vbuffer **ac_vb); +int spm_cvdev_pick_idle_vbuffer(struct spm_ccic_vnode *ac_vnode, struct spm_ccic_vbuffer **ac_vb); +int __spm_cvdev_pick_idle_vbuffer(struct spm_ccic_vnode *ac_vnode, struct spm_ccic_vbuffer **ac_vb); +int spm_cvdev_q_idle_vbuffer(struct spm_ccic_vnode *ac_vnode, struct spm_ccic_vbuffer *ac_vb); +int __spm_cvdev_dq_idle_vbuffer(struct spm_ccic_vnode *ac_vnode, struct spm_ccic_vbuffer **ac_vb); +int __spm_cvdev_q_idle_vbuffer(struct spm_ccic_vnode *ac_vnode, struct spm_ccic_vbuffer *ac_vb); +int spm_cvdev_dq_busy_vbuffer(struct spm_ccic_vnode *ac_vnode, struct spm_ccic_vbuffer **ac_vb); +int spm_cvdev_pick_busy_vbuffer(struct spm_ccic_vnode *ac_vnode, struct spm_ccic_vbuffer **ac_vb); +int __spm_cvdev_pick_busy_vbuffer(struct spm_ccic_vnode *ac_vnode, struct spm_ccic_vbuffer **ac_vb); +int spm_cvdev_q_busy_vbuffer(struct spm_ccic_vnode *ac_vnode, struct spm_ccic_vbuffer *ac_vb); +int __spm_cvdev_dq_busy_vbuffer(struct spm_ccic_vnode *ac_vnode, struct spm_ccic_vbuffer **ac_vb); +int __spm_cvdev_q_busy_vbuffer(struct spm_ccic_vnode *ac_vnode, struct spm_ccic_vbuffer *ac_vb); +int spm_cvdev_export_ccic_vbuffer(struct spm_ccic_vbuffer *ac_vb, int with_error); +void spm_cvdev_fill_v4l2_format(struct v4l2_format *f); +#endif diff --git a/drivers/media/platform/spacemit/camera/vi/k1xvi/fe_isp.c b/drivers/media/platform/spacemit/camera/vi/k1xvi/fe_isp.c index fadc0631de13..a1bd81c30ee5 100644 --- a/drivers/media/platform/spacemit/camera/vi/k1xvi/fe_isp.c +++ b/drivers/media/platform/spacemit/camera/vi/k1xvi/fe_isp.c @@ -1185,7 +1185,7 @@ static int csi_subdev_video_s_stream(struct v4l2_subdev *sd, int enable) cam_err("%s(%s) config mux(enable) failed ret=%d", __func__, sc_subdev->name, ret); return ret; } - ret = csi_ctrl->ops->config_csi2_mbus(csi_ctrl, CCIC_CSI2VC_NM, 0, 0, mipi_lane_num); + ret = csi_ctrl->ops->config_csi2_mbus(csi_ctrl, CCIC_CSI2VC_NM, 0, 0, 0, 0, mipi_lane_num); if (ret) { cam_err("%s(%s) config mbus(enable) lane=%d failed ret=%d", __func__, sc_subdev->name, 4, ret); return ret; @@ -1193,7 +1193,7 @@ static int csi_subdev_video_s_stream(struct v4l2_subdev *sd, int enable) csi_ctrl->ops->irq_mask(csi_ctrl, 1); } else { csi_ctrl->ops->irq_mask(csi_ctrl, 0); - csi_ctrl->ops->config_csi2_mbus(csi_ctrl, CCIC_CSI2VC_NM, 0, 0, 0); + csi_ctrl->ops->config_csi2_mbus(csi_ctrl, CCIC_CSI2VC_NM, 0, 0, 0, 0, 0); csi_ctrl->ops->config_csi2idi_mux(csi_ctrl, csi2vc, csi2idi, 0); csi_subdev_core_s_power(sd, 0); } diff --git a/include/uapi/media/k1x/k1x_ccic_uapi.h b/include/uapi/media/k1x/k1x_ccic_uapi.h new file mode 100644 index 000000000000..b4ce3de050f2 --- /dev/null +++ b/include/uapi/media/k1x/k1x_ccic_uapi.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * mars11_ccic_uapi.h - Driver uapi for SPACEMIT K1X CCIC + * + * Copyright (C) 2024 SPACEMIT Micro Limited + */ + +#ifndef _UAPI_LINUX_K1X_CCIC_H_ +#define _UAPI_LINUX_K1X_CCIC_H_ +//#include + +enum { + CCIC_MODE_NM = 0, + CCIC_MODE_VC, + CCIC_MODE_VCDT, +}; + +enum { + CCIC_CH_MODE_MAIN = 0, + CCIC_CH_MODE_SUB, +}; + +struct v4l2_ccic_params { + unsigned int lane_num; + int ccic_mode; + int ch_mode; + unsigned int main_ccic_id; + unsigned int main_vc; + unsigned int sub_vc; + unsigned int main_dt; + unsigned int sub_dt; +}; + + +#define BASE_VIDIOC_CCIC (BASE_VIDIOC_PRIVATE + 20) +#define VIDIOC_CCIC_S_PARAMS _IOWR('V', BASE_VIDIOC_CCIC + 1, struct v4l2_ccic_params) +#endif