|
@@ -7,4 +7,20 @@
|
|
#define L1_CACHE_SHIFT 5
|
|
#define L1_CACHE_SHIFT 5
|
|
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
|
|
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * Memory returned by kmalloc() may be used for DMA, so we must make
|
|
|
|
+ * sure that all such allocations are cache aligned. Otherwise,
|
|
|
|
+ * unrelated code may cause parts of the buffer to be read into the
|
|
|
|
+ * cache before the transfer is done, causing old data to be seen by
|
|
|
|
+ * the CPU.
|
|
|
|
+ */
|
|
|
|
+#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * With EABI on ARMv5 and above we must have 64-bit aligned slab pointers.
|
|
|
|
+ */
|
|
|
|
+#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
|
|
|
|
+#define ARCH_SLAB_MINALIGN 8
|
|
|
|
+#endif
|
|
|
|
+
|
|
#endif
|
|
#endif
|