anolis: Revert "dmaengine: Remove the last, used parameters in dma_async_is_tx_complete"
ANBZ: #8716 This reverts commitbbb7037dd0
. The commitbbb7037dd0
(dmaengine: Remove the last, used parameters in dma_async_is_tx_complete) introduces a minor change but unfortunately breaks compatibility with upstream, rendering out-of-tree (OOT) drivers like knem unable to compile. Just revert it to fix this issue. Additionally, adapt to dma_async_is_tx_complete in prezero and page copy. Signed-off-by: Guanjun <guanjun@linux.alibaba.com> Reviewed-by: Artie Ding <artie.ding@linux.alibaba.com> Link: https://gitee.com/anolis/cloud-kernel/pulls/3305
This commit is contained in:
parent
edb1ce9255
commit
6927322e43
|
@ -257,8 +257,8 @@ The details of these operations are:
|
|||
|
||||
dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc)
|
||||
|
||||
This returns a cookie that can be used to check the progress of a transaction
|
||||
via dma_async_is_tx_complete().
|
||||
This returns a cookie can be used to check the progress of DMA engine
|
||||
activity via other DMA engine calls not covered in this document.
|
||||
|
||||
dmaengine_submit() will not start the DMA operation, it merely adds
|
||||
it to the pending queue. For this, see step 5, dma_async_issue_pending.
|
||||
|
@ -338,11 +338,22 @@ Further APIs
|
|||
.. code-block:: c
|
||||
|
||||
enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
|
||||
dma_cookie_t cookie)
|
||||
dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
|
||||
|
||||
This can be used to check the status of the channel. Please see
|
||||
the documentation in include/linux/dmaengine.h for a more complete
|
||||
description of this API.
|
||||
|
||||
This can be used with the cookie returned from dmaengine_submit()
|
||||
to check for completion of a specific DMA transaction.
|
||||
|
||||
.. note::
|
||||
|
||||
Not all DMA engine drivers can return reliable information for
|
||||
a running DMA channel. It is recommended that DMA engine users
|
||||
pause or stop (via dmaengine_terminate_all()) the channel before
|
||||
using this API.
|
||||
|
||||
5. Synchronize termination API
|
||||
|
||||
.. code-block:: c
|
||||
|
|
|
@ -535,6 +535,13 @@ where to put them)
|
|||
- Makes sure that dependent operations are run before marking it
|
||||
as complete.
|
||||
|
||||
dma_cookie_t
|
||||
|
||||
- it's a DMA transaction ID that will increment over time.
|
||||
|
||||
- Not really relevant any more since the introduction of ``virt-dma``
|
||||
that abstracts it away.
|
||||
|
||||
DMA_CTRL_ACK
|
||||
|
||||
- If clear, the descriptor cannot be reused by provider until the
|
||||
|
|
|
@ -452,7 +452,8 @@ static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev,
|
|||
msecs_to_jiffies(100)))
|
||||
err = -ETIMEDOUT;
|
||||
|
||||
if (dma_async_is_tx_complete(hdev->dma_lch, cookie) != DMA_COMPLETE)
|
||||
if (dma_async_is_tx_complete(hdev->dma_lch, cookie,
|
||||
NULL, NULL) != DMA_COMPLETE)
|
||||
err = -ETIMEDOUT;
|
||||
|
||||
if (err) {
|
||||
|
|
|
@ -523,7 +523,7 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
|
|||
|
||||
dma_async_issue_pending(chan);
|
||||
do {
|
||||
status = dma_async_is_tx_complete(chan, cookie);
|
||||
status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
|
||||
if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
|
||||
dev_err(chan->device->dev, "%s: timeout!\n", __func__);
|
||||
return DMA_ERROR;
|
||||
|
|
|
@ -829,7 +829,8 @@ static int dmatest_func(void *data)
|
|||
done->done,
|
||||
msecs_to_jiffies(params->timeout));
|
||||
|
||||
status = dma_async_is_tx_complete(chan, cookie);
|
||||
status = dma_async_is_tx_complete(chan, cookie, NULL,
|
||||
NULL);
|
||||
}
|
||||
|
||||
if (!done->done) {
|
||||
|
|
|
@ -289,7 +289,7 @@ int omap_vout_prepare_vrfb(struct omap_vout_device *vout,
|
|||
vout->vrfb_dma_tx.tx_status == 1,
|
||||
VRFB_TX_TIMEOUT);
|
||||
|
||||
status = dma_async_is_tx_complete(chan, cookie);
|
||||
status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
|
||||
|
||||
if (vout->vrfb_dma_tx.tx_status == 0) {
|
||||
pr_err("%s: Timeout while waiting for DMA\n", __func__);
|
||||
|
|
|
@ -1053,17 +1053,8 @@ static void pxa_camera_dma_irq(struct pxa_camera_dev *pcdev,
|
|||
last_buf = list_entry(pcdev->capture.prev,
|
||||
struct pxa_buffer, queue);
|
||||
last_status = dma_async_is_tx_complete(pcdev->dma_chans[chan],
|
||||
last_buf->cookie[chan]);
|
||||
/*
|
||||
* Peek into the channel and read the last cookie that was issued.
|
||||
* This is a layering violation - the dmaengine API does not officially
|
||||
* provide this information. Since this camera driver is tightly coupled
|
||||
* with a specific DMA device we know exactly how this cookie value will
|
||||
* behave. Otherwise, this wouldn't be safe.
|
||||
*/
|
||||
last_issued = pcdev->dma_chans[chan]->cookie;
|
||||
barrier();
|
||||
|
||||
last_buf->cookie[chan],
|
||||
NULL, &last_issued);
|
||||
if (camera_status & overrun &&
|
||||
last_status != DMA_COMPLETE) {
|
||||
dev_dbg(pcdev_to_dev(pcdev), "FIFO overrun! CISR: %x\n",
|
||||
|
|
|
@ -597,7 +597,8 @@ static void dma_xfer_callback(void *param)
|
|||
struct mport_dma_req *req = (struct mport_dma_req *)param;
|
||||
struct mport_cdev_priv *priv = req->priv;
|
||||
|
||||
req->status = dma_async_is_tx_complete(priv->dmach, req->cookie);
|
||||
req->status = dma_async_is_tx_complete(priv->dmach, req->cookie,
|
||||
NULL, NULL);
|
||||
complete(&req->req_comp);
|
||||
kref_put(&req->refcount, dma_req_free);
|
||||
}
|
||||
|
|
|
@ -1431,13 +1431,25 @@ static inline void dma_async_issue_pending(struct dma_chan *chan)
|
|||
* dma_async_is_tx_complete - poll for transaction completion
|
||||
* @chan: DMA channel
|
||||
* @cookie: transaction identifier to check status of
|
||||
* @last: returns last completed cookie, can be NULL
|
||||
* @used: returns last issued cookie, can be NULL
|
||||
*
|
||||
* If @last and @used are passed in, upon return they reflect the most
|
||||
* recently submitted (used) cookie and the most recently completed
|
||||
* cookie.
|
||||
*/
|
||||
static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
|
||||
dma_cookie_t cookie)
|
||||
dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
|
||||
{
|
||||
struct dma_tx_state state;
|
||||
enum dma_status status;
|
||||
|
||||
return chan->device->device_tx_status(chan, cookie, &state);
|
||||
status = chan->device->device_tx_status(chan, cookie, &state);
|
||||
if (last)
|
||||
*last = state.last;
|
||||
if (used)
|
||||
*used = state.used;
|
||||
return status;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DMA_ENGINE
|
||||
|
|
|
@ -103,7 +103,7 @@ static int __dma_page_copy_sg(struct scatterlist *src, struct scatterlist *dst,
|
|||
err = -EIO;
|
||||
goto unmap_sg;
|
||||
}
|
||||
status = dma_async_is_tx_complete(dma_copy_chan, cookie);
|
||||
status = dma_async_is_tx_complete(dma_copy_chan, cookie, NULL, NULL);
|
||||
if (status != DMA_COMPLETE)
|
||||
err = -EIO;
|
||||
}
|
||||
|
|
|
@ -297,7 +297,7 @@ static int clear_page_hw(struct page *page, int order, int node)
|
|||
ret = -EIO;
|
||||
goto err_prep;
|
||||
}
|
||||
status = dma_async_is_tx_complete(dma_chan, cookie);
|
||||
status = dma_async_is_tx_complete(dma_chan, cookie, NULL, NULL);
|
||||
if (status != DMA_COMPLETE) {
|
||||
pr_info("Failed to check DMA completion status on node %d\n", node);
|
||||
ret = -EIO;
|
||||
|
|
Loading…
Reference in New Issue