|
@@ -27,6 +27,7 @@
|
|
|
#include <linux/memory.h>
|
|
|
#include <linux/math64.h>
|
|
|
#include <linux/fault-inject.h>
|
|
|
+#include <linux/stacktrace.h>
|
|
|
|
|
|
#include <trace/events/kmem.h>
|
|
|
|
|
@@ -191,8 +192,12 @@ static LIST_HEAD(slab_caches);
|
|
|
/*
|
|
|
* Tracking user of a slab.
|
|
|
*/
|
|
|
+#define TRACK_ADDRS_COUNT 16
|
|
|
struct track {
|
|
|
unsigned long addr; /* Called from address */
|
|
|
+#ifdef CONFIG_STACKTRACE
|
|
|
+ unsigned long addrs[TRACK_ADDRS_COUNT]; /* Called from address */
|
|
|
+#endif
|
|
|
int cpu; /* Was running on cpu */
|
|
|
int pid; /* Pid context */
|
|
|
unsigned long when; /* When did the operation occur */
|
|
@@ -420,6 +425,24 @@ static void set_track(struct kmem_cache *s, void *object,
|
|
|
struct track *p = get_track(s, object, alloc);
|
|
|
|
|
|
if (addr) {
|
|
|
+#ifdef CONFIG_STACKTRACE
|
|
|
+ struct stack_trace trace;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ trace.nr_entries = 0;
|
|
|
+ trace.max_entries = TRACK_ADDRS_COUNT;
|
|
|
+ trace.entries = p->addrs;
|
|
|
+ trace.skip = 3;
|
|
|
+ save_stack_trace(&trace);
|
|
|
+
|
|
|
+ /* See rant in lockdep.c */
|
|
|
+ if (trace.nr_entries != 0 &&
|
|
|
+ trace.entries[trace.nr_entries - 1] == ULONG_MAX)
|
|
|
+ trace.nr_entries--;
|
|
|
+
|
|
|
+ for (i = trace.nr_entries; i < TRACK_ADDRS_COUNT; i++)
|
|
|
+ p->addrs[i] = 0;
|
|
|
+#endif
|
|
|
p->addr = addr;
|
|
|
p->cpu = smp_processor_id();
|
|
|
p->pid = current->pid;
|
|
@@ -444,6 +467,16 @@ static void print_track(const char *s, struct track *t)
|
|
|
|
|
|
printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
|
|
|
s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
|
|
|
+#ifdef CONFIG_STACKTRACE
|
|
|
+ {
|
|
|
+ int i;
|
|
|
+ for (i = 0; i < TRACK_ADDRS_COUNT; i++)
|
|
|
+ if (t->addrs[i])
|
|
|
+ printk(KERN_ERR "\t%pS\n", (void *)t->addrs[i]);
|
|
|
+ else
|
|
|
+ break;
|
|
|
+ }
|
|
|
+#endif
|
|
|
}
|
|
|
|
|
|
static void print_tracking(struct kmem_cache *s, void *object)
|
|
@@ -557,10 +590,10 @@ static void init_object(struct kmem_cache *s, void *object, u8 val)
|
|
|
memset(p + s->objsize, val, s->inuse - s->objsize);
|
|
|
}
|
|
|
|
|
|
-static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes)
|
|
|
+static u8 *check_bytes8(u8 *start, u8 value, unsigned int bytes)
|
|
|
{
|
|
|
while (bytes) {
|
|
|
- if (*start != (u8)value)
|
|
|
+ if (*start != value)
|
|
|
return start;
|
|
|
start++;
|
|
|
bytes--;
|
|
@@ -568,6 +601,38 @@ static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes)
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
+static u8 *check_bytes(u8 *start, u8 value, unsigned int bytes)
|
|
|
+{
|
|
|
+ u64 value64;
|
|
|
+ unsigned int words, prefix;
|
|
|
+
|
|
|
+ if (bytes <= 16)
|
|
|
+ return check_bytes8(start, value, bytes);
|
|
|
+
|
|
|
+ value64 = value | value << 8 | value << 16 | value << 24;
|
|
|
+ value64 = value64 | value64 << 32;
|
|
|
+ prefix = 8 - ((unsigned long)start) % 8;
|
|
|
+
|
|
|
+ if (prefix) {
|
|
|
+ u8 *r = check_bytes8(start, value, prefix);
|
|
|
+ if (r)
|
|
|
+ return r;
|
|
|
+ start += prefix;
|
|
|
+ bytes -= prefix;
|
|
|
+ }
|
|
|
+
|
|
|
+ words = bytes / 8;
|
|
|
+
|
|
|
+ while (words) {
|
|
|
+ if (*(u64 *)start != value64)
|
|
|
+ return check_bytes8(start, value, 8);
|
|
|
+ start += 8;
|
|
|
+ words--;
|
|
|
+ }
|
|
|
+
|
|
|
+ return check_bytes8(start, value, bytes % 8);
|
|
|
+}
|
|
|
+
|
|
|
static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
|
|
|
void *from, void *to)
|
|
|
{
|
|
@@ -2928,6 +2993,42 @@ size_t ksize(const void *object)
|
|
|
}
|
|
|
EXPORT_SYMBOL(ksize);
|
|
|
|
|
|
+#ifdef CONFIG_SLUB_DEBUG
|
|
|
+bool verify_mem_not_deleted(const void *x)
|
|
|
+{
|
|
|
+ struct page *page;
|
|
|
+ void *object = (void *)x;
|
|
|
+ unsigned long flags;
|
|
|
+ bool rv;
|
|
|
+
|
|
|
+ if (unlikely(ZERO_OR_NULL_PTR(x)))
|
|
|
+ return false;
|
|
|
+
|
|
|
+ local_irq_save(flags);
|
|
|
+
|
|
|
+ page = virt_to_head_page(x);
|
|
|
+ if (unlikely(!PageSlab(page))) {
|
|
|
+ /* maybe it was from stack? */
|
|
|
+ rv = true;
|
|
|
+ goto out_unlock;
|
|
|
+ }
|
|
|
+
|
|
|
+ slab_lock(page);
|
|
|
+ if (on_freelist(page->slab, page, object)) {
|
|
|
+ object_err(page->slab, page, object, "Object is on free-list");
|
|
|
+ rv = false;
|
|
|
+ } else {
|
|
|
+ rv = true;
|
|
|
+ }
|
|
|
+ slab_unlock(page);
|
|
|
+
|
|
|
+out_unlock:
|
|
|
+ local_irq_restore(flags);
|
|
|
+ return rv;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(verify_mem_not_deleted);
|
|
|
+#endif
|
|
|
+
|
|
|
void kfree(const void *x)
|
|
|
{
|
|
|
struct page *page;
|