Make sure the OUT DBELL base address reflects the latest values written to it. Fix: Add a wait until the OUT DBELL base address register is updated with the DMA ring descriptor address, and modify the setup_oq function to properly handle failures. Fixes: 2c0c32c72be29 ("octeon_ep_vf: add hardware configuration APIs") Signed-off-by: Sathesh Edara Signed-off-by: Shinas Rasheed Signed-off-by: Vimlesh Kumar --- .../marvell/octeon_ep_vf/octep_vf_cn9k.c | 3 ++- .../marvell/octeon_ep_vf/octep_vf_cnxk.c | 25 ++++++++++++++++--- .../marvell/octeon_ep_vf/octep_vf_main.h | 6 ++++- .../marvell/octeon_ep_vf/octep_vf_rx.c | 4 ++- 4 files changed, 32 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c index 88937fce75f1..4c769b27c278 100644 --- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c +++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c @@ -196,7 +196,7 @@ static void octep_vf_setup_iq_regs_cn93(struct octep_vf_device *oct, int iq_no) } /* Setup registers for a hardware Rx Queue */ -static void octep_vf_setup_oq_regs_cn93(struct octep_vf_device *oct, int oq_no) +static int octep_vf_setup_oq_regs_cn93(struct octep_vf_device *oct, int oq_no) { struct octep_vf_oq *oq = oct->oq[oq_no]; u32 time_threshold = 0; @@ -239,6 +239,7 @@ static void octep_vf_setup_oq_regs_cn93(struct octep_vf_device *oct, int oq_no) time_threshold = CFG_GET_OQ_INTR_TIME(oct->conf); reg_val = ((u64)time_threshold << 32) | CFG_GET_OQ_INTR_PKT(oct->conf); octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(oq_no), reg_val); + return 0; } /* Setup registers for a VF mailbox */ diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c index 1f79dfad42c6..30dc09205446 100644 --- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c +++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c @@ -199,11 +199,12 @@ static void octep_vf_setup_iq_regs_cnxk(struct octep_vf_device *oct, int iq_no) } /* Setup registers for a hardware Rx Queue */ -static void octep_vf_setup_oq_regs_cnxk(struct octep_vf_device *oct, int oq_no) +static int octep_vf_setup_oq_regs_cnxk(struct octep_vf_device *oct, int oq_no) { struct octep_vf_oq *oq = oct->oq[oq_no]; u32 time_threshold = 0; u64 oq_ctl = ULL(0); + u64 reg_ba_val; u64 reg_val; reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no)); @@ -214,6 +215,25 @@ static void octep_vf_setup_oq_regs_cnxk(struct octep_vf_device *oct, int oq_no) reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no)); } while (!(reg_val & CNXK_VF_R_OUT_CTL_IDLE)); } + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_WMARK(oq_no), oq->max_count); + /* Wait for WMARK to get applied */ + usleep_range(10, 15); + + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_BADDR(oq_no), oq->desc_ring_dma); + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_RSIZE(oq_no), oq->max_count); + reg_ba_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_BADDR(oq_no)); + if (reg_ba_val != oq->desc_ring_dma) { + do { + if (reg_ba_val == UINT64_MAX) + return -1; + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_BADDR(oq_no), + oq->desc_ring_dma); + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_RSIZE(oq_no), + oq->max_count); + reg_ba_val = octep_vf_read_csr64(oct, + CNXK_VF_SDP_R_OUT_SLIST_BADDR(oq_no)); + } while (reg_ba_val != oq->desc_ring_dma); + } reg_val &= ~(CNXK_VF_R_OUT_CTL_IMODE); reg_val &= ~(CNXK_VF_R_OUT_CTL_ROR_P); @@ -227,8 +247,6 @@ static void octep_vf_setup_oq_regs_cnxk(struct octep_vf_device *oct, int oq_no) reg_val |= (CNXK_VF_R_OUT_CTL_ES_P); octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no), reg_val); - octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_BADDR(oq_no), oq->desc_ring_dma); - octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_RSIZE(oq_no), oq->max_count); oq_ctl = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no)); /* Clear the ISIZE and BSIZE (22-0) */ @@ -250,6 +268,7 @@ static void octep_vf_setup_oq_regs_cnxk(struct octep_vf_device *oct, int oq_no) reg_val &= ~GENMASK_ULL(31, 0); reg_val |= CFG_GET_OQ_WMARK(oct->conf); octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_WMARK(oq_no), reg_val); + return 0; } /* Setup registers for a VF mailbox */ diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h index b9f13506f462..65454d875677 100644 --- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h +++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h @@ -40,6 +40,10 @@ (iq_)->max_count - IQ_INSTR_PENDING(iq_); \ }) +#ifndef UINT64_MAX +#define UINT64_MAX ((u64)(~((u64)0))) /* 0xFFFFFFFFFFFFFFFF */ +#endif + /* PCI address space mapping information. * Each of the 3 address spaces given by BAR0, BAR2 and BAR4 of * Octeon gets mapped to different physical address spaces in @@ -55,7 +59,7 @@ struct octep_vf_mmio { struct octep_vf_hw_ops { void (*setup_iq_regs)(struct octep_vf_device *oct, int q); - void (*setup_oq_regs)(struct octep_vf_device *oct, int q); + int (*setup_oq_regs)(struct octep_vf_device *oct, int q); void (*setup_mbox_regs)(struct octep_vf_device *oct, int mbox); irqreturn_t (*non_ioq_intr_handler)(void *ioq_vector); diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c index d70c8be3cfc4..6446f6bf0b90 100644 --- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c +++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c @@ -171,7 +171,9 @@ static int octep_vf_setup_oq(struct octep_vf_device *oct, int q_no) goto oq_fill_buff_err; octep_vf_oq_reset_indices(oq); - oct->hw_ops.setup_oq_regs(oct, q_no); + if (oct->hw_ops.setup_oq_regs(oct, q_no)) + goto oq_fill_buff_err; + oct->num_oqs++; return 0; -- 2.47.0