|
@@ -86,7 +86,31 @@ static struct {
|
|
|
#endif /* CONFIG_CIFS_WEAK_PW_HASH */
|
|
|
#endif /* CIFS_POSIX */
|
|
|
|
|
|
-/* Forward declarations */
|
|
|
+#ifdef CONFIG_HIGHMEM
|
|
|
+/*
|
|
|
+ * On arches that have high memory, kmap address space is limited. By
|
|
|
+ * serializing the kmap operations on those arches, we ensure that we don't
|
|
|
+ * end up with a bunch of threads in writeback with partially mapped page
|
|
|
+ * arrays, stuck waiting for kmap to come back. That situation prevents
|
|
|
+ * progress and can deadlock.
|
|
|
+ */
|
|
|
+static DEFINE_MUTEX(cifs_kmap_mutex);
|
|
|
+
|
|
|
+static inline void
|
|
|
+cifs_kmap_lock(void)
|
|
|
+{
|
|
|
+ mutex_lock(&cifs_kmap_mutex);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void
|
|
|
+cifs_kmap_unlock(void)
|
|
|
+{
|
|
|
+ mutex_unlock(&cifs_kmap_mutex);
|
|
|
+}
|
|
|
+#else /* !CONFIG_HIGHMEM */
|
|
|
+#define cifs_kmap_lock() do { ; } while(0)
|
|
|
+#define cifs_kmap_unlock() do { ; } while(0)
|
|
|
+#endif /* CONFIG_HIGHMEM */
|
|
|
|
|
|
/* Mark as invalid, all open files on tree connections since they
|
|
|
were closed when session to server was lost */
|
|
@@ -1503,7 +1527,9 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
|
|
|
}
|
|
|
|
|
|
/* marshal up the page array */
|
|
|
+ cifs_kmap_lock();
|
|
|
len = rdata->marshal_iov(rdata, data_len);
|
|
|
+ cifs_kmap_unlock();
|
|
|
data_len -= len;
|
|
|
|
|
|
/* issue the read if we have any iovecs left to fill */
|
|
@@ -2069,7 +2095,9 @@ cifs_async_writev(struct cifs_writedata *wdata)
|
|
|
* and set the iov_len properly for each one. It may also set
|
|
|
* wdata->bytes too.
|
|
|
*/
|
|
|
+ cifs_kmap_lock();
|
|
|
wdata->marshal_iov(iov, wdata);
|
|
|
+ cifs_kmap_unlock();
|
|
|
|
|
|
cFYI(1, "async write at %llu %u bytes", wdata->offset, wdata->bytes);
|
|
|
|