|
@@ -88,7 +88,7 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
|
|
|
return;
|
|
|
prev->next = next;
|
|
|
if (link_trbs) {
|
|
|
- prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr[0] = next->dma;
|
|
|
+ prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = next->dma;
|
|
|
|
|
|
/* Set the last TRB in the segment to have a TRB type ID of Link TRB */
|
|
|
val = prev->trbs[TRBS_PER_SEGMENT-1].link.control;
|
|
@@ -200,8 +200,7 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
|
|
|
return;
|
|
|
|
|
|
dev = xhci->devs[slot_id];
|
|
|
- xhci->dcbaa->dev_context_ptrs[2*slot_id] = 0;
|
|
|
- xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0;
|
|
|
+ xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
|
|
|
if (!dev)
|
|
|
return;
|
|
|
|
|
@@ -265,13 +264,12 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
|
|
|
* Point to output device context in dcbaa; skip the output control
|
|
|
* context, which is eight 32 bit fields (or 32 bytes long)
|
|
|
*/
|
|
|
- xhci->dcbaa->dev_context_ptrs[2*slot_id] =
|
|
|
+ xhci->dcbaa->dev_context_ptrs[slot_id] =
|
|
|
(u32) dev->out_ctx_dma + (32);
|
|
|
xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
|
|
|
slot_id,
|
|
|
- &xhci->dcbaa->dev_context_ptrs[2*slot_id],
|
|
|
+ &xhci->dcbaa->dev_context_ptrs[slot_id],
|
|
|
(unsigned long long)dev->out_ctx_dma);
|
|
|
- xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0;
|
|
|
|
|
|
return 1;
|
|
|
fail:
|
|
@@ -360,10 +358,9 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
|
|
|
ep0_ctx->ep_info2 |= MAX_BURST(0);
|
|
|
ep0_ctx->ep_info2 |= ERROR_COUNT(3);
|
|
|
|
|
|
- ep0_ctx->deq[0] =
|
|
|
+ ep0_ctx->deq =
|
|
|
dev->ep_rings[0]->first_seg->dma;
|
|
|
- ep0_ctx->deq[0] |= dev->ep_rings[0]->cycle_state;
|
|
|
- ep0_ctx->deq[1] = 0;
|
|
|
+ ep0_ctx->deq |= dev->ep_rings[0]->cycle_state;
|
|
|
|
|
|
/* Steps 7 and 8 were done in xhci_alloc_virt_device() */
|
|
|
|
|
@@ -477,8 +474,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
|
|
|
if (!virt_dev->new_ep_rings[ep_index])
|
|
|
return -ENOMEM;
|
|
|
ep_ring = virt_dev->new_ep_rings[ep_index];
|
|
|
- ep_ctx->deq[0] = ep_ring->first_seg->dma | ep_ring->cycle_state;
|
|
|
- ep_ctx->deq[1] = 0;
|
|
|
+ ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state;
|
|
|
|
|
|
ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep);
|
|
|
|
|
@@ -535,8 +531,7 @@ void xhci_endpoint_zero(struct xhci_hcd *xhci,
|
|
|
|
|
|
ep_ctx->ep_info = 0;
|
|
|
ep_ctx->ep_info2 = 0;
|
|
|
- ep_ctx->deq[0] = 0;
|
|
|
- ep_ctx->deq[1] = 0;
|
|
|
+ ep_ctx->deq = 0;
|
|
|
ep_ctx->tx_info = 0;
|
|
|
/* Don't free the endpoint ring until the set interface or configuration
|
|
|
* request succeeds.
|
|
@@ -551,10 +546,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
|
|
|
|
|
|
/* Free the Event Ring Segment Table and the actual Event Ring */
|
|
|
xhci_writel(xhci, 0, &xhci->ir_set->erst_size);
|
|
|
- xhci_writel(xhci, 0, &xhci->ir_set->erst_base[0]);
|
|
|
- xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]);
|
|
|
- xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[0]);
|
|
|
- xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]);
|
|
|
+ xhci_write_64(xhci, 0, &xhci->ir_set->erst_base);
|
|
|
+ xhci_write_64(xhci, 0, &xhci->ir_set->erst_dequeue);
|
|
|
size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
|
|
|
if (xhci->erst.entries)
|
|
|
pci_free_consistent(pdev, size,
|
|
@@ -566,8 +559,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
|
|
|
xhci->event_ring = NULL;
|
|
|
xhci_dbg(xhci, "Freed event ring\n");
|
|
|
|
|
|
- xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[0]);
|
|
|
- xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[1]);
|
|
|
+ xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
|
|
|
if (xhci->cmd_ring)
|
|
|
xhci_ring_free(xhci, xhci->cmd_ring);
|
|
|
xhci->cmd_ring = NULL;
|
|
@@ -586,8 +578,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
|
|
|
xhci->device_pool = NULL;
|
|
|
xhci_dbg(xhci, "Freed device context pool\n");
|
|
|
|
|
|
- xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[0]);
|
|
|
- xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[1]);
|
|
|
+ xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
|
|
|
if (xhci->dcbaa)
|
|
|
pci_free_consistent(pdev, sizeof(*xhci->dcbaa),
|
|
|
xhci->dcbaa, xhci->dcbaa->dma);
|
|
@@ -602,6 +593,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
|
|
|
dma_addr_t dma;
|
|
|
struct device *dev = xhci_to_hcd(xhci)->self.controller;
|
|
|
unsigned int val, val2;
|
|
|
+ u64 val_64;
|
|
|
struct xhci_segment *seg;
|
|
|
u32 page_size;
|
|
|
int i;
|
|
@@ -647,8 +639,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
|
|
|
xhci->dcbaa->dma = dma;
|
|
|
xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n",
|
|
|
(unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
|
|
|
- xhci_writel(xhci, dma, &xhci->op_regs->dcbaa_ptr[0]);
|
|
|
- xhci_writel(xhci, (u32) 0, &xhci->op_regs->dcbaa_ptr[1]);
|
|
|
+ xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
|
|
|
|
|
|
/*
|
|
|
* Initialize the ring segment pool. The ring must be a contiguous
|
|
@@ -675,14 +666,12 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
|
|
|
(unsigned long long)xhci->cmd_ring->first_seg->dma);
|
|
|
|
|
|
/* Set the address in the Command Ring Control register */
|
|
|
- val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]);
|
|
|
- val = (val & ~CMD_RING_ADDR_MASK) |
|
|
|
- (xhci->cmd_ring->first_seg->dma & CMD_RING_ADDR_MASK) |
|
|
|
+ val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
|
|
|
+ val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
|
|
|
+ (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
|
|
|
xhci->cmd_ring->cycle_state;
|
|
|
- xhci_dbg(xhci, "// Setting command ring address low bits to 0x%x\n", val);
|
|
|
- xhci_writel(xhci, val, &xhci->op_regs->cmd_ring[0]);
|
|
|
- xhci_dbg(xhci, "// Setting command ring address high bits to 0x0\n");
|
|
|
- xhci_writel(xhci, (u32) 0, &xhci->op_regs->cmd_ring[1]);
|
|
|
+ xhci_dbg(xhci, "// Setting command ring address to 0x%x\n", val);
|
|
|
+ xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
|
|
|
xhci_dbg_cmd_ptrs(xhci);
|
|
|
|
|
|
val = xhci_readl(xhci, &xhci->cap_regs->db_off);
|
|
@@ -722,8 +711,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
|
|
|
/* set ring base address and size for each segment table entry */
|
|
|
for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
|
|
|
struct xhci_erst_entry *entry = &xhci->erst.entries[val];
|
|
|
- entry->seg_addr[0] = seg->dma;
|
|
|
- entry->seg_addr[1] = 0;
|
|
|
+ entry->seg_addr = seg->dma;
|
|
|
entry->seg_size = TRBS_PER_SEGMENT;
|
|
|
entry->rsvd = 0;
|
|
|
seg = seg->next;
|
|
@@ -741,11 +729,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
|
|
|
/* set the segment table base address */
|
|
|
xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n",
|
|
|
(unsigned long long)xhci->erst.erst_dma_addr);
|
|
|
- val = xhci_readl(xhci, &xhci->ir_set->erst_base[0]);
|
|
|
- val &= ERST_PTR_MASK;
|
|
|
- val |= (xhci->erst.erst_dma_addr & ~ERST_PTR_MASK);
|
|
|
- xhci_writel(xhci, val, &xhci->ir_set->erst_base[0]);
|
|
|
- xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]);
|
|
|
+ val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
|
|
|
+ val_64 &= ERST_PTR_MASK;
|
|
|
+ val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
|
|
|
+ xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
|
|
|
|
|
|
/* Set the event ring dequeue address */
|
|
|
xhci_set_hc_event_deq(xhci);
|