|
@@ -1021,6 +1021,109 @@ void __release_region(struct resource *parent, resource_size_t start,
|
|
|
}
|
|
|
EXPORT_SYMBOL(__release_region);
|
|
|
|
|
|
+#ifdef CONFIG_MEMORY_HOTREMOVE
|
|
|
+/**
|
|
|
+ * release_mem_region_adjustable - release a previously reserved memory region
|
|
|
+ * @parent: parent resource descriptor
|
|
|
+ * @start: resource start address
|
|
|
+ * @size: resource region size
|
|
|
+ *
|
|
|
+ * This interface is intended for memory hot-delete. The requested region
|
|
|
+ * is released from a currently busy memory resource. The requested region
|
|
|
+ * must either match exactly or fit into a single busy resource entry. In
|
|
|
+ * the latter case, the remaining resource is adjusted accordingly.
|
|
|
+ * Existing children of the busy memory resource must be immutable in the
|
|
|
+ * request.
|
|
|
+ *
|
|
|
+ * Note:
|
|
|
+ * - Additional release conditions, such as overlapping region, can be
|
|
|
+ * supported after they are confirmed as valid cases.
|
|
|
+ * - When a busy memory resource gets split into two entries, the code
|
|
|
+ * assumes that all children remain in the lower address entry for
|
|
|
+ * simplicity. Enhance this logic when necessary.
|
|
|
+ */
|
|
|
+int release_mem_region_adjustable(struct resource *parent,
|
|
|
+ resource_size_t start, resource_size_t size)
|
|
|
+{
|
|
|
+ struct resource **p;
|
|
|
+ struct resource *res;
|
|
|
+ struct resource *new_res;
|
|
|
+ resource_size_t end;
|
|
|
+ int ret = -EINVAL;
|
|
|
+
|
|
|
+ end = start + size - 1;
|
|
|
+ if ((start < parent->start) || (end > parent->end))
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ /* The kzalloc() result gets checked later */
|
|
|
+ new_res = kzalloc(sizeof(struct resource), GFP_KERNEL);
|
|
|
+
|
|
|
+ p = &parent->child;
|
|
|
+ write_lock(&resource_lock);
|
|
|
+
|
|
|
+ while ((res = *p)) {
|
|
|
+ if (res->start >= end)
|
|
|
+ break;
|
|
|
+
|
|
|
+ /* look for the next resource if it does not fit into */
|
|
|
+ if (res->start > start || res->end < end) {
|
|
|
+ p = &res->sibling;
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (!(res->flags & IORESOURCE_MEM))
|
|
|
+ break;
|
|
|
+
|
|
|
+ if (!(res->flags & IORESOURCE_BUSY)) {
|
|
|
+ p = &res->child;
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* found the target resource; let's adjust accordingly */
|
|
|
+ if (res->start == start && res->end == end) {
|
|
|
+ /* free the whole entry */
|
|
|
+ *p = res->sibling;
|
|
|
+ kfree(res);
|
|
|
+ ret = 0;
|
|
|
+ } else if (res->start == start && res->end != end) {
|
|
|
+ /* adjust the start */
|
|
|
+ ret = __adjust_resource(res, end + 1,
|
|
|
+ res->end - end);
|
|
|
+ } else if (res->start != start && res->end == end) {
|
|
|
+ /* adjust the end */
|
|
|
+ ret = __adjust_resource(res, res->start,
|
|
|
+ start - res->start);
|
|
|
+ } else {
|
|
|
+ /* split into two entries */
|
|
|
+ if (!new_res) {
|
|
|
+ ret = -ENOMEM;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ new_res->name = res->name;
|
|
|
+ new_res->start = end + 1;
|
|
|
+ new_res->end = res->end;
|
|
|
+ new_res->flags = res->flags;
|
|
|
+ new_res->parent = res->parent;
|
|
|
+ new_res->sibling = res->sibling;
|
|
|
+ new_res->child = NULL;
|
|
|
+
|
|
|
+ ret = __adjust_resource(res, res->start,
|
|
|
+ start - res->start);
|
|
|
+ if (ret)
|
|
|
+ break;
|
|
|
+ res->sibling = new_res;
|
|
|
+ new_res = NULL;
|
|
|
+ }
|
|
|
+
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ write_unlock(&resource_lock);
|
|
|
+ kfree(new_res);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+#endif /* CONFIG_MEMORY_HOTREMOVE */
|
|
|
+
|
|
|
/*
|
|
|
* Managed region resource
|
|
|
*/
|