Browse Source

xen/blkback: Union the blkif_request request specific fields

Following in the steps of patch:
"xen: Union the blkif_request request specific fields" this patch
changes the blkback. Per the original patch:

"Prepare for extending the block device ring to allow request
specific fields, by moving the request specific fields for
reads, writes and barrier requests to a union member."

Cc: Owen Smith <owen.smith@citrix.com>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Konrad Rzeszutek Wilk 14 years ago
parent
commit
c35950bfa9
2 changed files with 11 additions and 11 deletions
  1. 7 7
      drivers/xen/blkback/blkback.c
  2. 4 4
      include/xen/blkif.h

+ 7 - 7
drivers/xen/blkback/blkback.c

@@ -426,7 +426,7 @@ static void dispatch_rw_block_io(blkif_t *blkif,
 	}
 
 	preq.dev           = req->handle;
-	preq.sector_number = req->sector_number;
+	preq.sector_number = req->u.rw.sector_number;
 	preq.nr_sects      = 0;
 
 	pending_req->blkif     = blkif;
@@ -438,11 +438,11 @@ static void dispatch_rw_block_io(blkif_t *blkif,
 	for (i = 0; i < nseg; i++) {
 		uint32_t flags;
 
-		seg[i].nsec = req->seg[i].last_sect -
-			req->seg[i].first_sect + 1;
+		seg[i].nsec = req->u.rw.seg[i].last_sect -
+			req->u.rw.seg[i].first_sect + 1;
 
-		if ((req->seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
-		    (req->seg[i].last_sect < req->seg[i].first_sect))
+		if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
+		    (req->u.rw.seg[i].last_sect < req->u.rw.seg[i].first_sect))
 			goto fail_response;
 		preq.nr_sects += seg[i].nsec;
 
@@ -450,7 +450,7 @@ static void dispatch_rw_block_io(blkif_t *blkif,
 		if (operation != READ)
 			flags |= GNTMAP_readonly;
 		gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
-				  req->seg[i].gref, blkif->domid);
+				  req->u.rw.seg[i].gref, blkif->domid);
 	}
 
 	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg);
@@ -472,7 +472,7 @@ static void dispatch_rw_block_io(blkif_t *blkif,
 			page_to_pfn(blkbk->pending_page(pending_req, i)),
 			FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT));
 		seg[i].buf  = map[i].dev_bus_addr |
-			(req->seg[i].first_sect << 9);
+			(req->u.rw.seg[i].first_sect << 9);
 	}
 
 	if (ret)

+ 4 - 4
include/xen/blkif.h

@@ -96,12 +96,12 @@ static void inline blkif_get_x86_32_req(struct blkif_request *dst, struct blkif_
 	dst->nr_segments = src->nr_segments;
 	dst->handle = src->handle;
 	dst->id = src->id;
-	dst->sector_number = src->sector_number;
+	dst->u.rw.sector_number = src->sector_number;
 	barrier();
 	if (n > dst->nr_segments)
 		n = dst->nr_segments;
 	for (i = 0; i < n; i++)
-		dst->seg[i] = src->seg[i];
+		dst->u.rw.seg[i] = src->seg[i];
 }
 
 static void inline blkif_get_x86_64_req(struct blkif_request *dst, struct blkif_x86_64_request *src)
@@ -111,12 +111,12 @@ static void inline blkif_get_x86_64_req(struct blkif_request *dst, struct blkif_
 	dst->nr_segments = src->nr_segments;
 	dst->handle = src->handle;
 	dst->id = src->id;
-	dst->sector_number = src->sector_number;
+	dst->u.rw.sector_number = src->sector_number;
 	barrier();
 	if (n > dst->nr_segments)
 		n = dst->nr_segments;
 	for (i = 0; i < n; i++)
-		dst->seg[i] = src->seg[i];
+		dst->u.rw.seg[i] = src->seg[i];
 }
 
 #endif /* __XEN_BLKIF_H__ */