|
@@ -284,7 +284,7 @@ struct kmem_list3 {
|
|
* Need this for bootstrapping a per node allocator.
|
|
* Need this for bootstrapping a per node allocator.
|
|
*/
|
|
*/
|
|
#define NUM_INIT_LISTS (3 * MAX_NUMNODES)
|
|
#define NUM_INIT_LISTS (3 * MAX_NUMNODES)
|
|
-struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
|
|
|
|
|
|
+static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
|
|
#define CACHE_CACHE 0
|
|
#define CACHE_CACHE 0
|
|
#define SIZE_AC MAX_NUMNODES
|
|
#define SIZE_AC MAX_NUMNODES
|
|
#define SIZE_L3 (2 * MAX_NUMNODES)
|
|
#define SIZE_L3 (2 * MAX_NUMNODES)
|
|
@@ -4053,7 +4053,7 @@ static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
|
|
* necessary. Note that the l3 listlock also protects the array_cache
|
|
* necessary. Note that the l3 listlock also protects the array_cache
|
|
* if drain_array() is used on the shared array.
|
|
* if drain_array() is used on the shared array.
|
|
*/
|
|
*/
|
|
-void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
|
|
|
|
|
|
+static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
|
|
struct array_cache *ac, int force, int node)
|
|
struct array_cache *ac, int force, int node)
|
|
{
|
|
{
|
|
int tofree;
|
|
int tofree;
|
|
@@ -4317,7 +4317,7 @@ static const struct seq_operations slabinfo_op = {
|
|
* @count: data length
|
|
* @count: data length
|
|
* @ppos: unused
|
|
* @ppos: unused
|
|
*/
|
|
*/
|
|
-ssize_t slabinfo_write(struct file *file, const char __user * buffer,
|
|
|
|
|
|
+static ssize_t slabinfo_write(struct file *file, const char __user *buffer,
|
|
size_t count, loff_t *ppos)
|
|
size_t count, loff_t *ppos)
|
|
{
|
|
{
|
|
char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
|
|
char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
|