Replace usage of runtime_get() and runtime_put() function pointers in the MHI core with direct calls to the pm_runtime APIs. As the controller drivers were not doing more than calling the PM runtime APIs in these callbacks. This refactoring simplifies the runtime PM handling and removes the need for drivers to provide no-op or duplicate implementations of these ops. Signed-off-by: Krishna Chaitanya Chundru --- drivers/bus/mhi/host/init.c | 1 - drivers/bus/mhi/host/internal.h | 7 +++++-- drivers/bus/mhi/host/main.c | 19 ++++++++++++------- 3 files changed, 17 insertions(+), 10 deletions(-) diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c index 099be8dd190078af4ca12cd7a1de739572feca9d..5b22808fd543f9febcaa9ea623101d2d5d05b040 100644 --- a/drivers/bus/mhi/host/init.c +++ b/drivers/bus/mhi/host/init.c @@ -938,7 +938,6 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl, int ret, i; if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->regs || - !mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put || !mhi_cntrl->status_cb || !mhi_cntrl->read_reg || !mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs || !mhi_cntrl->irq || !mhi_cntrl->reg_len) diff --git a/drivers/bus/mhi/host/internal.h b/drivers/bus/mhi/host/internal.h index 7937bb1f742ca8033c17a01f5cffbf103330f51f..61e03298e898e6dd02d2a977cddc4c87b21e3a6c 100644 --- a/drivers/bus/mhi/host/internal.h +++ b/drivers/bus/mhi/host/internal.h @@ -7,6 +7,8 @@ #ifndef _MHI_INT_H #define _MHI_INT_H +#include + #include "../common.h" extern const struct bus_type mhi_bus_type; @@ -353,8 +355,9 @@ static inline bool mhi_is_active(struct mhi_controller *mhi_cntrl) static inline void mhi_trigger_resume(struct mhi_controller *mhi_cntrl) { pm_wakeup_event(&mhi_cntrl->mhi_dev->dev, 0); - mhi_cntrl->runtime_get(mhi_cntrl); - mhi_cntrl->runtime_put(mhi_cntrl); + pm_runtime_get(mhi_cntrl->cntrl_dev); + pm_runtime_mark_last_busy(mhi_cntrl->cntrl_dev); + pm_runtime_put(mhi_cntrl->cntrl_dev); } /* Register access methods */ diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c index 8615512743199a59a58c3756d9cc3407079cee7e..7ac1162a0a81ae11245a2bbd9bf6fd6c0f86fbc1 100644 --- a/drivers/bus/mhi/host/main.c +++ b/drivers/bus/mhi/host/main.c @@ -661,7 +661,8 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl, if (mhi_chan->dir == DMA_TO_DEVICE) { atomic_dec(&mhi_cntrl->pending_pkts); /* Release the reference got from mhi_queue() */ - mhi_cntrl->runtime_put(mhi_cntrl); + pm_runtime_mark_last_busy(mhi_cntrl->cntrl_dev); + pm_runtime_put(mhi_cntrl->cntrl_dev); } /* @@ -1155,7 +1156,7 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info, * for host->device buffer, balanced put is done on buffer completion * for device->host buffer, balanced put is after ringing the DB */ - mhi_cntrl->runtime_get(mhi_cntrl); + pm_runtime_get(mhi_cntrl->cntrl_dev); /* Assert dev_wake (to exit/prevent M1/M2)*/ mhi_cntrl->wake_toggle(mhi_cntrl); @@ -1166,8 +1167,10 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info, if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) mhi_ring_chan_db(mhi_cntrl, mhi_chan); - if (dir == DMA_FROM_DEVICE) - mhi_cntrl->runtime_put(mhi_cntrl); + if (dir == DMA_FROM_DEVICE) { + pm_runtime_mark_last_busy(mhi_cntrl->cntrl_dev); + pm_runtime_put(mhi_cntrl->cntrl_dev); + } read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); @@ -1374,7 +1377,7 @@ static int mhi_update_channel_state(struct mhi_controller *mhi_cntrl, ret = mhi_device_get_sync(mhi_cntrl->mhi_dev); if (ret) return ret; - mhi_cntrl->runtime_get(mhi_cntrl); + pm_runtime_get(mhi_cntrl->cntrl_dev); reinit_completion(&mhi_chan->completion); ret = mhi_send_cmd(mhi_cntrl, mhi_chan, cmd); @@ -1405,7 +1408,8 @@ static int mhi_update_channel_state(struct mhi_controller *mhi_cntrl, trace_mhi_channel_command_end(mhi_cntrl, mhi_chan, to_state, TPS("Updated")); exit_channel_update: - mhi_cntrl->runtime_put(mhi_cntrl); + pm_runtime_mark_last_busy(mhi_cntrl->cntrl_dev); + pm_runtime_put(mhi_cntrl->cntrl_dev); mhi_device_put(mhi_cntrl->mhi_dev); return ret; @@ -1591,7 +1595,8 @@ static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl, if (mhi_chan->dir == DMA_TO_DEVICE) { atomic_dec(&mhi_cntrl->pending_pkts); /* Release the reference got from mhi_queue() */ - mhi_cntrl->runtime_put(mhi_cntrl); + pm_runtime_mark_last_busy(mhi_cntrl->cntrl_dev); + pm_runtime_put(mhi_cntrl->cntrl_dev); } if (!buf_info->pre_mapped) -- 2.34.1 Remove the runtime_get and runtime_put function pointers from the struct mhi_controller interface and all associated usage across the MHI host stack. These callbacks were previously required by MHI drivers to abstract runtime PM handling, but are now redundant. The MHI core has been updated to directly use standard pm_runtime_* APIs, eliminating the need for driver-specific indirection. Signed-off-by: Krishna Chaitanya Chundru --- drivers/accel/qaic/mhi_controller.c | 11 ----------- drivers/bus/mhi/host/pci_generic.c | 24 +++--------------------- drivers/net/wireless/ath/ath11k/mhi.c | 10 ---------- drivers/net/wireless/ath/ath12k/mhi.c | 11 ----------- include/linux/mhi.h | 4 ---- 5 files changed, 3 insertions(+), 57 deletions(-) diff --git a/drivers/accel/qaic/mhi_controller.c b/drivers/accel/qaic/mhi_controller.c index 13a14c6c61689fa4af47dade6d62b3cb1a148354..319344be658b38656f6e85e92be4b5473f43c897 100644 --- a/drivers/accel/qaic/mhi_controller.c +++ b/drivers/accel/qaic/mhi_controller.c @@ -820,15 +820,6 @@ static void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *addr, writel_relaxed(val, addr); } -static int mhi_runtime_get(struct mhi_controller *mhi_cntrl) -{ - return 0; -} - -static void mhi_runtime_put(struct mhi_controller *mhi_cntrl) -{ -} - static void mhi_status_cb(struct mhi_controller *mhi_cntrl, enum mhi_callback reason) { struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(mhi_cntrl->cntrl_dev)); @@ -889,8 +880,6 @@ struct mhi_controller *qaic_mhi_register_controller(struct pci_dev *pci_dev, voi mhi_cntrl->iova_start = 0; mhi_cntrl->iova_stop = PHYS_ADDR_MAX - 1; mhi_cntrl->status_cb = mhi_status_cb; - mhi_cntrl->runtime_get = mhi_runtime_get; - mhi_cntrl->runtime_put = mhi_runtime_put; mhi_cntrl->read_reg = mhi_read_reg; mhi_cntrl->write_reg = mhi_write_reg; mhi_cntrl->regs = mhi_bar; diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c index b188bbf7de042d8b9aa0dde1217d2c86558c3caf..7036b1654c550a79e53fb449b944d67b68aad677 100644 --- a/drivers/bus/mhi/host/pci_generic.c +++ b/drivers/bus/mhi/host/pci_generic.c @@ -1173,23 +1173,6 @@ static int mhi_pci_get_irqs(struct mhi_controller *mhi_cntrl, return 0; } -static int mhi_pci_runtime_get(struct mhi_controller *mhi_cntrl) -{ - /* The runtime_get() MHI callback means: - * Do whatever is requested to leave M3. - */ - return pm_runtime_get(mhi_cntrl->cntrl_dev); -} - -static void mhi_pci_runtime_put(struct mhi_controller *mhi_cntrl) -{ - /* The runtime_put() MHI callback means: - * Device can be moved in M3 state. - */ - pm_runtime_mark_last_busy(mhi_cntrl->cntrl_dev); - pm_runtime_put(mhi_cntrl->cntrl_dev); -} - static void mhi_pci_recovery_work(struct work_struct *work) { struct mhi_pci_device *mhi_pdev = container_of(work, struct mhi_pci_device, @@ -1277,7 +1260,7 @@ static int mhi_pci_generic_edl_trigger(struct mhi_controller *mhi_cntrl) } pm_wakeup_event(&mhi_cntrl->mhi_dev->dev, 0); - mhi_cntrl->runtime_get(mhi_cntrl); + pm_runtime_get(mhi_cntrl->cntrl_dev); ret = mhi_get_channel_doorbell_offset(mhi_cntrl, &val); if (ret) @@ -1291,7 +1274,8 @@ static int mhi_pci_generic_edl_trigger(struct mhi_controller *mhi_cntrl) mhi_soc_reset(mhi_cntrl); err_get_chdb: - mhi_cntrl->runtime_put(mhi_cntrl); + pm_runtime_mark_last_busy(mhi_cntrl->cntrl_dev); + pm_runtime_put(mhi_cntrl->cntrl_dev); mhi_device_put(mhi_cntrl->mhi_dev); return ret; @@ -1338,8 +1322,6 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) mhi_cntrl->read_reg = mhi_pci_read_reg; mhi_cntrl->write_reg = mhi_pci_write_reg; mhi_cntrl->status_cb = mhi_pci_status_cb; - mhi_cntrl->runtime_get = mhi_pci_runtime_get; - mhi_cntrl->runtime_put = mhi_pci_runtime_put; mhi_cntrl->mru = info->mru_default; mhi_cntrl->name = info->name; diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c index acd76e9392d31192aca6776319ef0829a1c69628..18bac9e4bc35bffabef05171b88bd5515e7df925 100644 --- a/drivers/net/wireless/ath/ath11k/mhi.c +++ b/drivers/net/wireless/ath/ath11k/mhi.c @@ -230,14 +230,6 @@ static int ath11k_mhi_get_msi(struct ath11k_pci *ab_pci) return 0; } -static int ath11k_mhi_op_runtime_get(struct mhi_controller *mhi_cntrl) -{ - return 0; -} - -static void ath11k_mhi_op_runtime_put(struct mhi_controller *mhi_cntrl) -{ -} static char *ath11k_mhi_op_callback_to_str(enum mhi_callback reason) { @@ -384,8 +376,6 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci) mhi_ctrl->sbl_size = SZ_512K; mhi_ctrl->seg_len = SZ_512K; mhi_ctrl->fbc_download = true; - mhi_ctrl->runtime_get = ath11k_mhi_op_runtime_get; - mhi_ctrl->runtime_put = ath11k_mhi_op_runtime_put; mhi_ctrl->status_cb = ath11k_mhi_op_status_cb; mhi_ctrl->read_reg = ath11k_mhi_op_read_reg; mhi_ctrl->write_reg = ath11k_mhi_op_write_reg; diff --git a/drivers/net/wireless/ath/ath12k/mhi.c b/drivers/net/wireless/ath/ath12k/mhi.c index 08f44baf182a5e34651e8c117fe279942f8ad8f4..99d8d9a8944cefa2561cd47d83bbeb53ef13044d 100644 --- a/drivers/net/wireless/ath/ath12k/mhi.c +++ b/drivers/net/wireless/ath/ath12k/mhi.c @@ -230,15 +230,6 @@ static int ath12k_mhi_get_msi(struct ath12k_pci *ab_pci) return 0; } -static int ath12k_mhi_op_runtime_get(struct mhi_controller *mhi_cntrl) -{ - return 0; -} - -static void ath12k_mhi_op_runtime_put(struct mhi_controller *mhi_cntrl) -{ -} - static char *ath12k_mhi_op_callback_to_str(enum mhi_callback reason) { switch (reason) { @@ -386,8 +377,6 @@ int ath12k_mhi_register(struct ath12k_pci *ab_pci) mhi_ctrl->sbl_size = SZ_512K; mhi_ctrl->seg_len = SZ_512K; mhi_ctrl->fbc_download = true; - mhi_ctrl->runtime_get = ath12k_mhi_op_runtime_get; - mhi_ctrl->runtime_put = ath12k_mhi_op_runtime_put; mhi_ctrl->status_cb = ath12k_mhi_op_status_cb; mhi_ctrl->read_reg = ath12k_mhi_op_read_reg; mhi_ctrl->write_reg = ath12k_mhi_op_write_reg; diff --git a/include/linux/mhi.h b/include/linux/mhi.h index dd372b0123a6da5107b807ff8fe940c567eb2030..312e5c4b9cf8a46ffb20e2afc70441a11ecf659c 100644 --- a/include/linux/mhi.h +++ b/include/linux/mhi.h @@ -347,8 +347,6 @@ struct mhi_controller_config { * @wake_get: CB function to assert device wake (optional) * @wake_put: CB function to de-assert device wake (optional) * @wake_toggle: CB function to assert and de-assert device wake (optional) - * @runtime_get: CB function to controller runtime resume (required) - * @runtime_put: CB function to decrement pm usage (required) * @map_single: CB function to create TRE buffer * @unmap_single: CB function to destroy TRE buffer * @read_reg: Read a MHI register via the physical link (required) @@ -427,8 +425,6 @@ struct mhi_controller { void (*wake_get)(struct mhi_controller *mhi_cntrl, bool override); void (*wake_put)(struct mhi_controller *mhi_cntrl, bool override); void (*wake_toggle)(struct mhi_controller *mhi_cntrl); - int (*runtime_get)(struct mhi_controller *mhi_cntrl); - void (*runtime_put)(struct mhi_controller *mhi_cntrl); int (*map_single)(struct mhi_controller *mhi_cntrl, struct mhi_buf_info *buf); void (*unmap_single)(struct mhi_controller *mhi_cntrl, -- 2.34.1 Add runtime power management support to the mhi_net driver to align with the updated MHI framework, which expects runtime PM to be enabled by client drivers. This ensures the controller remains active during data transfers and can autosuspend when idle. The driver now uses pm_runtime_get() and pm_runtime_put() around transmit, receive, and buffer refill operations. Runtime PM is initialized during probe with autosuspend enabled and a 100ms delay. The device is marked with pm_runtime_no_callbacks() to notify PM framework that there are no callbacks for this driver. Signed-off-by: Krishna Chaitanya Chundru --- drivers/net/mhi_net.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/drivers/net/mhi_net.c b/drivers/net/mhi_net.c index ae169929a9d8e449b5a427993abf68e8d032fae2..c5c697f29e69e9bc874b6cfff4699de12a4af114 100644 --- a/drivers/net/mhi_net.c +++ b/drivers/net/mhi_net.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include @@ -76,6 +77,7 @@ static netdev_tx_t mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev) struct mhi_device *mdev = mhi_netdev->mdev; int err; + pm_runtime_get(&mdev->dev); err = mhi_queue_skb(mdev, DMA_TO_DEVICE, skb, skb->len, MHI_EOT); if (unlikely(err)) { net_err_ratelimited("%s: Failed to queue TX buf (%d)\n", @@ -94,6 +96,7 @@ static netdev_tx_t mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev) u64_stats_inc(&mhi_netdev->stats.tx_dropped); u64_stats_update_end(&mhi_netdev->stats.tx_syncp); + pm_runtime_put_autosuspend(&mdev->dev); return NETDEV_TX_OK; } @@ -261,6 +264,7 @@ static void mhi_net_ul_callback(struct mhi_device *mhi_dev, } u64_stats_update_end(&mhi_netdev->stats.tx_syncp); + pm_runtime_put_autosuspend(&mdev->dev); if (netif_queue_stopped(ndev) && !mhi_queue_is_full(mdev, DMA_TO_DEVICE)) netif_wake_queue(ndev); } @@ -277,6 +281,7 @@ static void mhi_net_rx_refill_work(struct work_struct *work) size = mhi_netdev->mru ? mhi_netdev->mru : READ_ONCE(ndev->mtu); + pm_runtime_get_sync(&mdev->dev); while (!mhi_queue_is_full(mdev, DMA_FROM_DEVICE)) { skb = netdev_alloc_skb(ndev, size); if (unlikely(!skb)) @@ -296,6 +301,7 @@ static void mhi_net_rx_refill_work(struct work_struct *work) cond_resched(); } + pm_runtime_put_autosuspend(&mdev->dev); /* If we're still starved of rx buffers, reschedule later */ if (mhi_get_free_desc_count(mdev, DMA_FROM_DEVICE) == mhi_netdev->rx_queue_sz) schedule_delayed_work(&mhi_netdev->rx_refill, HZ / 2); @@ -362,12 +368,19 @@ static int mhi_net_probe(struct mhi_device *mhi_dev, SET_NETDEV_DEV(ndev, &mhi_dev->dev); + pm_runtime_no_callbacks(&mhi_dev->dev); + devm_pm_runtime_set_active_enabled(&mhi_dev->dev); + pm_runtime_set_autosuspend_delay(&mhi_dev->dev, 100); + pm_runtime_use_autosuspend(&mhi_dev->dev); + pm_runtime_get(&mhi_dev->dev); err = mhi_net_newlink(mhi_dev, ndev); if (err) { free_netdev(ndev); + pm_runtime_put_autosuspend(&mhi_dev->dev); return err; } + pm_runtime_put_autosuspend(&mhi_dev->dev); return 0; } -- 2.34.1 The current MHI runtime PM design is flawed, as the MHI core attempts to manage power references internally via mhi_queue() and related paths. This is problematic because the controller drivers do not have the knowledge of the client PM status due to the broken PM topology. So when they runtime suspend the controller, the client drivers could no longer function. To address this, in the new design, the client drivers reports their own runtime PM status now and the PM framework makes sure that the parent (controller driver) and other components up in the chain remain active. This leverages the standard parent-child PM relationship. Since MHI creates a mhi_dev device without an associated driver, we explicitly enable runtime PM on it and mark it with pm_runtime_no_callbacks() to indicate the PM core that no callbacks exist for this device. This is only needed for MHI controller, since the controller driver uses the bus device just like PCI device. Also Update the MHI core to explicitly manage runtime PM references in __mhi_device_get_sync() and mhi_device_put() to ensure the controller does not enter suspend while a client device is active. Signed-off-by: Krishna Chaitanya Chundru --- drivers/bus/mhi/host/internal.h | 6 +++--- drivers/bus/mhi/host/main.c | 28 ++++------------------------ drivers/bus/mhi/host/pm.c | 18 ++++++++---------- 3 files changed, 15 insertions(+), 37 deletions(-) diff --git a/drivers/bus/mhi/host/internal.h b/drivers/bus/mhi/host/internal.h index 61e03298e898e6dd02d2a977cddc4c87b21e3a6c..d6a3168bb3ecc34eab1036c0e74f8d70cf422fed 100644 --- a/drivers/bus/mhi/host/internal.h +++ b/drivers/bus/mhi/host/internal.h @@ -355,9 +355,9 @@ static inline bool mhi_is_active(struct mhi_controller *mhi_cntrl) static inline void mhi_trigger_resume(struct mhi_controller *mhi_cntrl) { pm_wakeup_event(&mhi_cntrl->mhi_dev->dev, 0); - pm_runtime_get(mhi_cntrl->cntrl_dev); - pm_runtime_mark_last_busy(mhi_cntrl->cntrl_dev); - pm_runtime_put(mhi_cntrl->cntrl_dev); + pm_runtime_get(&mhi_cntrl->mhi_dev->dev); + pm_runtime_mark_last_busy(&mhi_cntrl->mhi_dev->dev); + pm_runtime_put(&mhi_cntrl->mhi_dev->dev); } /* Register access methods */ diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c index 7ac1162a0a81ae11245a2bbd9bf6fd6c0f86fbc1..85a9a5a62a6d3f92b0e9dc35b13fd867db89dd95 100644 --- a/drivers/bus/mhi/host/main.c +++ b/drivers/bus/mhi/host/main.c @@ -427,6 +427,8 @@ void mhi_create_devices(struct mhi_controller *mhi_cntrl) if (ret) put_device(&mhi_dev->dev); } + pm_runtime_no_callbacks(&mhi_cntrl->mhi_dev->dev); + devm_pm_runtime_set_active_enabled(&mhi_cntrl->mhi_dev->dev); } irqreturn_t mhi_irq_handler(int irq_number, void *dev) @@ -658,12 +660,8 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl, /* notify client */ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); - if (mhi_chan->dir == DMA_TO_DEVICE) { + if (mhi_chan->dir == DMA_TO_DEVICE) atomic_dec(&mhi_cntrl->pending_pkts); - /* Release the reference got from mhi_queue() */ - pm_runtime_mark_last_busy(mhi_cntrl->cntrl_dev); - pm_runtime_put(mhi_cntrl->cntrl_dev); - } /* * Recycle the buffer if buffer is pre-allocated, @@ -1152,12 +1150,6 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info, read_lock_irqsave(&mhi_cntrl->pm_lock, flags); - /* Packet is queued, take a usage ref to exit M3 if necessary - * for host->device buffer, balanced put is done on buffer completion - * for device->host buffer, balanced put is after ringing the DB - */ - pm_runtime_get(mhi_cntrl->cntrl_dev); - /* Assert dev_wake (to exit/prevent M1/M2)*/ mhi_cntrl->wake_toggle(mhi_cntrl); @@ -1167,11 +1159,6 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info, if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) mhi_ring_chan_db(mhi_cntrl, mhi_chan); - if (dir == DMA_FROM_DEVICE) { - pm_runtime_mark_last_busy(mhi_cntrl->cntrl_dev); - pm_runtime_put(mhi_cntrl->cntrl_dev); - } - read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); return ret; @@ -1377,7 +1364,6 @@ static int mhi_update_channel_state(struct mhi_controller *mhi_cntrl, ret = mhi_device_get_sync(mhi_cntrl->mhi_dev); if (ret) return ret; - pm_runtime_get(mhi_cntrl->cntrl_dev); reinit_completion(&mhi_chan->completion); ret = mhi_send_cmd(mhi_cntrl, mhi_chan, cmd); @@ -1408,8 +1394,6 @@ static int mhi_update_channel_state(struct mhi_controller *mhi_cntrl, trace_mhi_channel_command_end(mhi_cntrl, mhi_chan, to_state, TPS("Updated")); exit_channel_update: - pm_runtime_mark_last_busy(mhi_cntrl->cntrl_dev); - pm_runtime_put(mhi_cntrl->cntrl_dev); mhi_device_put(mhi_cntrl->mhi_dev); return ret; @@ -1592,12 +1576,8 @@ static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl, while (tre_ring->rp != tre_ring->wp) { struct mhi_buf_info *buf_info = buf_ring->rp; - if (mhi_chan->dir == DMA_TO_DEVICE) { + if (mhi_chan->dir == DMA_TO_DEVICE) atomic_dec(&mhi_cntrl->pending_pkts); - /* Release the reference got from mhi_queue() */ - pm_runtime_mark_last_busy(mhi_cntrl->cntrl_dev); - pm_runtime_put(mhi_cntrl->cntrl_dev); - } if (!buf_info->pre_mapped) mhi_cntrl->unmap_single(mhi_cntrl, buf_info); diff --git a/drivers/bus/mhi/host/pm.c b/drivers/bus/mhi/host/pm.c index b4ef115189b505c3450ff0949ad2d09f3ed53386..fd690e8af693109ed8c55248db0ea153f9e69423 100644 --- a/drivers/bus/mhi/host/pm.c +++ b/drivers/bus/mhi/host/pm.c @@ -429,6 +429,7 @@ static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl) if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { ret = -EIO; + read_unlock_bh(&mhi_cntrl->pm_lock); goto error_mission_mode; } @@ -459,11 +460,9 @@ static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl) */ mhi_create_devices(mhi_cntrl); - read_lock_bh(&mhi_cntrl->pm_lock); error_mission_mode: - mhi_cntrl->wake_put(mhi_cntrl, false); - read_unlock_bh(&mhi_cntrl->pm_lock); + mhi_device_put(mhi_cntrl->mhi_dev); return ret; } @@ -1038,9 +1037,11 @@ int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl) read_unlock_bh(&mhi_cntrl->pm_lock); return -EIO; } + read_unlock_bh(&mhi_cntrl->pm_lock); + + pm_runtime_get_sync(&mhi_cntrl->mhi_dev->dev); + read_lock_bh(&mhi_cntrl->pm_lock); mhi_cntrl->wake_get(mhi_cntrl, true); - if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) - mhi_trigger_resume(mhi_cntrl); read_unlock_bh(&mhi_cntrl->pm_lock); ret = wait_event_timeout(mhi_cntrl->state_event, @@ -1049,9 +1050,7 @@ int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl) msecs_to_jiffies(mhi_cntrl->timeout_ms)); if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { - read_lock_bh(&mhi_cntrl->pm_lock); - mhi_cntrl->wake_put(mhi_cntrl, false); - read_unlock_bh(&mhi_cntrl->pm_lock); + mhi_device_put(mhi_cntrl->mhi_dev); return -EIO; } @@ -1339,11 +1338,10 @@ void mhi_device_put(struct mhi_device *mhi_dev) mhi_dev->dev_wake--; read_lock_bh(&mhi_cntrl->pm_lock); - if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) - mhi_trigger_resume(mhi_cntrl); mhi_cntrl->wake_put(mhi_cntrl, false); read_unlock_bh(&mhi_cntrl->pm_lock); + pm_runtime_put(&mhi_cntrl->mhi_dev->dev); } EXPORT_SYMBOL_GPL(mhi_device_put); -- 2.34.1