|
@@ -852,6 +852,148 @@ out_err:
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
+void
|
|
|
+nfsd4_set_statp(struct svc_rqst *rqstp, __be32 *statp)
|
|
|
+{
|
|
|
+ struct nfsd4_compoundres *resp = rqstp->rq_resp;
|
|
|
+
|
|
|
+ resp->cstate.statp = statp;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Dereference the result pages.
|
|
|
+ */
|
|
|
+static void
|
|
|
+nfsd4_release_respages(struct page **respages, short resused)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+
|
|
|
+ dprintk("--> %s\n", __func__);
|
|
|
+ for (i = 0; i < resused; i++) {
|
|
|
+ if (!respages[i])
|
|
|
+ continue;
|
|
|
+ put_page(respages[i]);
|
|
|
+ respages[i] = NULL;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static void
|
|
|
+nfsd4_copy_pages(struct page **topages, struct page **frompages, short count)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+
|
|
|
+ for (i = 0; i < count; i++) {
|
|
|
+ topages[i] = frompages[i];
|
|
|
+ if (!topages[i])
|
|
|
+ continue;
|
|
|
+ get_page(topages[i]);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Cache the reply pages up to NFSD_PAGES_PER_SLOT + 1, clearing the previous
|
|
|
+ * pages. We add a page to NFSD_PAGES_PER_SLOT for the case where the total
|
|
|
+ * length of the XDR response is less than se_fmaxresp_cached
|
|
|
+ * (NFSD_PAGES_PER_SLOT * PAGE_SIZE) but the xdr_buf pages is used for a
|
|
|
+ * of the reply (e.g. readdir).
|
|
|
+ *
|
|
|
+ * Store the base and length of the rq_req.head[0] page
|
|
|
+ * of the NFSv4.1 data, just past the rpc header.
|
|
|
+ */
|
|
|
+void
|
|
|
+nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
|
|
|
+{
|
|
|
+ struct nfsd4_cache_entry *entry = &resp->cstate.slot->sl_cache_entry;
|
|
|
+ struct svc_rqst *rqstp = resp->rqstp;
|
|
|
+ struct nfsd4_compoundargs *args = rqstp->rq_argp;
|
|
|
+ struct nfsd4_op *op = &args->ops[resp->opcnt];
|
|
|
+ struct kvec *resv = &rqstp->rq_res.head[0];
|
|
|
+
|
|
|
+ dprintk("--> %s entry %p\n", __func__, entry);
|
|
|
+
|
|
|
+ /* Don't cache a failed OP_SEQUENCE. */
|
|
|
+ if (resp->opcnt == 1 && op->opnum == OP_SEQUENCE && resp->cstate.status)
|
|
|
+ return;
|
|
|
+ nfsd4_release_respages(entry->ce_respages, entry->ce_resused);
|
|
|
+ entry->ce_resused = rqstp->rq_resused;
|
|
|
+ if (entry->ce_resused > NFSD_PAGES_PER_SLOT + 1)
|
|
|
+ entry->ce_resused = NFSD_PAGES_PER_SLOT + 1;
|
|
|
+ nfsd4_copy_pages(entry->ce_respages, rqstp->rq_respages,
|
|
|
+ entry->ce_resused);
|
|
|
+ entry->ce_status = resp->cstate.status;
|
|
|
+ entry->ce_datav.iov_base = resp->cstate.statp;
|
|
|
+ entry->ce_datav.iov_len = resv->iov_len - ((char *)resp->cstate.statp -
|
|
|
+ (char *)page_address(rqstp->rq_respages[0]));
|
|
|
+ entry->ce_opcnt = resp->opcnt;
|
|
|
+ /* Current request rpc header length*/
|
|
|
+ entry->ce_rpchdrlen = (char *)resp->cstate.statp -
|
|
|
+ (char *)page_address(rqstp->rq_respages[0]);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * We keep the rpc header, but take the nfs reply from the replycache.
|
|
|
+ */
|
|
|
+static int
|
|
|
+nfsd41_copy_replay_data(struct nfsd4_compoundres *resp,
|
|
|
+ struct nfsd4_cache_entry *entry)
|
|
|
+{
|
|
|
+ struct svc_rqst *rqstp = resp->rqstp;
|
|
|
+ struct kvec *resv = &resp->rqstp->rq_res.head[0];
|
|
|
+ int len;
|
|
|
+
|
|
|
+ /* Current request rpc header length*/
|
|
|
+ len = (char *)resp->cstate.statp -
|
|
|
+ (char *)page_address(rqstp->rq_respages[0]);
|
|
|
+ if (entry->ce_datav.iov_len + len > PAGE_SIZE) {
|
|
|
+ dprintk("%s v41 cached reply too large (%Zd).\n", __func__,
|
|
|
+ entry->ce_datav.iov_len);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+ /* copy the cached reply nfsd data past the current rpc header */
|
|
|
+ memcpy((char *)resv->iov_base + len, entry->ce_datav.iov_base,
|
|
|
+ entry->ce_datav.iov_len);
|
|
|
+ resv->iov_len = len + entry->ce_datav.iov_len;
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Keep the first page of the replay. Copy the NFSv4.1 data from the first
|
|
|
+ * cached page. Replace any futher replay pages from the cache.
|
|
|
+ */
|
|
|
+__be32
|
|
|
+nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp)
|
|
|
+{
|
|
|
+ struct nfsd4_cache_entry *entry = &resp->cstate.slot->sl_cache_entry;
|
|
|
+ __be32 status;
|
|
|
+
|
|
|
+ dprintk("--> %s entry %p\n", __func__, entry);
|
|
|
+
|
|
|
+
|
|
|
+ if (!nfsd41_copy_replay_data(resp, entry)) {
|
|
|
+ /*
|
|
|
+ * Not enough room to use the replay rpc header, send the
|
|
|
+ * cached header. Release all the allocated result pages.
|
|
|
+ */
|
|
|
+ svc_free_res_pages(resp->rqstp);
|
|
|
+ nfsd4_copy_pages(resp->rqstp->rq_respages, entry->ce_respages,
|
|
|
+ entry->ce_resused);
|
|
|
+ } else {
|
|
|
+ /* Release all but the first allocated result page */
|
|
|
+
|
|
|
+ resp->rqstp->rq_resused--;
|
|
|
+ svc_free_res_pages(resp->rqstp);
|
|
|
+
|
|
|
+ nfsd4_copy_pages(&resp->rqstp->rq_respages[1],
|
|
|
+ &entry->ce_respages[1],
|
|
|
+ entry->ce_resused - 1);
|
|
|
+ }
|
|
|
+
|
|
|
+ resp->rqstp->rq_resused = entry->ce_resused;
|
|
|
+ status = entry->ce_status;
|
|
|
+
|
|
|
+ return status;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Set the exchange_id flags returned by the server.
|
|
|
*/
|