|
@@ -270,38 +270,6 @@ static int ath6kl_sdio_scat_rw(struct ath6kl_sdio *ar_sdio,
|
|
|
return status;
|
|
|
}
|
|
|
|
|
|
-
|
|
|
-/* scatter gather read write request */
|
|
|
-static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
|
|
|
- struct hif_scatter_req *scat_req)
|
|
|
-{
|
|
|
- struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
|
|
|
- struct hif_scatter_req_priv *req_priv = scat_req->req_priv;
|
|
|
- u32 request = scat_req->req;
|
|
|
- int status = 0;
|
|
|
- unsigned long flags;
|
|
|
-
|
|
|
- if (!scat_req->len)
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- ath6kl_dbg(ATH6KL_DBG_SCATTER,
|
|
|
- "hif-scatter: total len: %d scatter entries: %d\n",
|
|
|
- scat_req->len, scat_req->scat_entries);
|
|
|
-
|
|
|
- if (request & HIF_SYNCHRONOUS) {
|
|
|
- sdio_claim_host(ar_sdio->func);
|
|
|
- status = ath6kl_sdio_scat_rw(ar_sdio, req_priv->busrequest);
|
|
|
- sdio_release_host(ar_sdio->func);
|
|
|
- } else {
|
|
|
- spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
|
|
|
- list_add_tail(&req_priv->busrequest->list, &ar_sdio->wr_asyncq);
|
|
|
- spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
|
|
|
- queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
|
|
|
- }
|
|
|
-
|
|
|
- return status;
|
|
|
-}
|
|
|
-
|
|
|
/* clean up scatter support */
|
|
|
static void ath6kl_sdio_cleanup_scat_resource(struct ath6kl_sdio *ar_sdio)
|
|
|
{
|
|
@@ -654,6 +622,37 @@ static int ath6kl_sdio_enable_scatter(struct ath6kl *ar,
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+/* scatter gather read write request */
|
|
|
+static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
|
|
|
+ struct hif_scatter_req *scat_req)
|
|
|
+{
|
|
|
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
|
|
|
+ struct hif_scatter_req_priv *req_priv = scat_req->req_priv;
|
|
|
+ u32 request = scat_req->req;
|
|
|
+ int status = 0;
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ if (!scat_req->len)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ ath6kl_dbg(ATH6KL_DBG_SCATTER,
|
|
|
+ "hif-scatter: total len: %d scatter entries: %d\n",
|
|
|
+ scat_req->len, scat_req->scat_entries);
|
|
|
+
|
|
|
+ if (request & HIF_SYNCHRONOUS) {
|
|
|
+ sdio_claim_host(ar_sdio->func);
|
|
|
+ status = ath6kl_sdio_scat_rw(ar_sdio, req_priv->busrequest);
|
|
|
+ sdio_release_host(ar_sdio->func);
|
|
|
+ } else {
|
|
|
+ spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
|
|
|
+ list_add_tail(&req_priv->busrequest->list, &ar_sdio->wr_asyncq);
|
|
|
+ spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
|
|
|
+ queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
|
|
|
+ }
|
|
|
+
|
|
|
+ return status;
|
|
|
+}
|
|
|
+
|
|
|
static void ath6kl_sdio_cleanup_scatter(struct ath6kl *ar)
|
|
|
{
|
|
|
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
|