USB: xHCI: Add stack support for xHCI
This adds stack layer for eXtensible Host Controller Interface which facilitates use of USB 3.0 in host mode. Adapting xHCI host controller driver in linux-kernel by Sarah Sharp to needs in u-boot. Initial porting from Linux kernel version 3.4, with following top commit history of drivers/usb/host/xhci* : cf84055 xHCI: Cleanup isoc transfer ring when TD length mismatch found This adds the basic xHCI host controller driver with bare minimum features: - Control/Bulk transfer support has been added with required infrastructure for necessary xHC data structures. - Stream protocol hasn't been supported yet. - No support for quirky devices has been added. Signed-off-by: Vikas C Sajjan <vikas.sajjan@samsung.com> Signed-off-by: Julius Werner <jwerner@chromium.org> Signed-off-by: Vivek Gautam <gautam.vivek@samsung.com> Cc: Simon Glass <sjg@chromium.org> Cc: Minkyu Kang <mk7.kang@samsung.com> Cc: Dan Murphy <dmurphy@ti.com> Cc: Marek Vasut <marex@denx.de>
This commit is contained in:
parent
e3d7440c22
commit
5853e1335c
33
common/usb.c
33
common/usb.c
@ -854,6 +854,16 @@ void usb_free_device(void)
|
||||
usb_dev[dev_index].devnum = -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* XHCI issues Enable Slot command and thereafter
|
||||
* allocates device contexts. Provide a weak alias
|
||||
* function for the purpose, so that XHCI overrides it
|
||||
* and EHCI/OHCI just work out of the box.
|
||||
*/
|
||||
__weak int usb_alloc_device(struct usb_device *udev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
/*
|
||||
* By the time we get here, the device has gotten a new device ID
|
||||
* and is in the default state. We need to identify the thing and
|
||||
@ -867,6 +877,17 @@ int usb_new_device(struct usb_device *dev)
|
||||
int tmp;
|
||||
ALLOC_CACHE_ALIGN_BUFFER(unsigned char, tmpbuf, USB_BUFSIZ);
|
||||
|
||||
/*
|
||||
* Allocate usb 3.0 device context.
|
||||
* USB 3.0 (xHCI) protocol tries to allocate device slot
|
||||
* and related data structures first. This call does that.
|
||||
* Refer to sec 4.3.2 in xHCI spec rev1.0
|
||||
*/
|
||||
if (usb_alloc_device(dev)) {
|
||||
printf("Cannot allocate device context to get SLOT_ID\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* We still haven't set the Address yet */
|
||||
addr = dev->devnum;
|
||||
dev->devnum = 0;
|
||||
@ -897,7 +918,7 @@ int usb_new_device(struct usb_device *dev)
|
||||
* http://sourceforge.net/mailarchive/forum.php?
|
||||
* thread_id=5729457&forum_id=5398
|
||||
*/
|
||||
struct usb_device_descriptor *desc;
|
||||
__maybe_unused struct usb_device_descriptor *desc;
|
||||
int port = -1;
|
||||
struct usb_device *parent = dev->parent;
|
||||
unsigned short portstatus;
|
||||
@ -914,6 +935,13 @@ int usb_new_device(struct usb_device *dev)
|
||||
dev->epmaxpacketin[0] = 64;
|
||||
dev->epmaxpacketout[0] = 64;
|
||||
|
||||
/*
|
||||
* XHCI needs to issue a Address device command to setup
|
||||
* proper device context structures, before it can interact
|
||||
* with the device. So a get_descriptor will fail before any
|
||||
* of that is done for XHCI unlike EHCI.
|
||||
*/
|
||||
#ifndef CONFIG_USB_XHCI
|
||||
err = usb_get_descriptor(dev, USB_DT_DEVICE, 0, desc, 64);
|
||||
if (err < 0) {
|
||||
debug("usb_new_device: usb_get_descriptor() failed\n");
|
||||
@ -926,11 +954,12 @@ int usb_new_device(struct usb_device *dev)
|
||||
* to differentiate between HUB and DEVICE.
|
||||
*/
|
||||
dev->descriptor.bDeviceClass = desc->bDeviceClass;
|
||||
#endif
|
||||
|
||||
/* find the port number we're at */
|
||||
if (parent) {
|
||||
int j;
|
||||
|
||||
/* find the port number we're at */
|
||||
for (j = 0; j < parent->maxchild; j++) {
|
||||
if (parent->children[j] == dev) {
|
||||
port = j;
|
||||
|
@ -42,6 +42,9 @@ COBJS-$(CONFIG_USB_EHCI_SPEAR) += ehci-spear.o
|
||||
COBJS-$(CONFIG_USB_EHCI_TEGRA) += ehci-tegra.o
|
||||
COBJS-$(CONFIG_USB_EHCI_VCT) += ehci-vct.o
|
||||
|
||||
# xhci
|
||||
COBJS-$(CONFIG_USB_XHCI) += xhci.o xhci-mem.o xhci-ring.o
|
||||
|
||||
COBJS := $(COBJS-y)
|
||||
SRCS := $(COBJS:.o=.c)
|
||||
OBJS := $(addprefix $(obj),$(COBJS))
|
||||
|
720
drivers/usb/host/xhci-mem.c
Normal file
720
drivers/usb/host/xhci-mem.c
Normal file
@ -0,0 +1,720 @@
|
||||
/*
|
||||
* USB HOST XHCI Controller stack
|
||||
*
|
||||
* Based on xHCI host controller driver in linux-kernel
|
||||
* by Sarah Sharp.
|
||||
*
|
||||
* Copyright (C) 2008 Intel Corp.
|
||||
* Author: Sarah Sharp
|
||||
*
|
||||
* Copyright (C) 2013 Samsung Electronics Co.Ltd
|
||||
* Authors: Vivek Gautam <gautam.vivek@samsung.com>
|
||||
* Vikas Sajjan <vikas.sajjan@samsung.com>
|
||||
*
|
||||
* SPDX-License-Identifier: GPL-2.0+
|
||||
*/
|
||||
|
||||
#include <common.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include <usb.h>
|
||||
#include <malloc.h>
|
||||
#include <asm/cache.h>
|
||||
#include <asm-generic/errno.h>
|
||||
|
||||
#include "xhci.h"
|
||||
|
||||
#define CACHELINE_SIZE CONFIG_SYS_CACHELINE_SIZE
|
||||
/**
|
||||
* flushes the address passed till the length
|
||||
*
|
||||
* @param addr pointer to memory region to be flushed
|
||||
* @param len the length of the cache line to be flushed
|
||||
* @return none
|
||||
*/
|
||||
void xhci_flush_cache(uint32_t addr, u32 len)
|
||||
{
|
||||
BUG_ON((void *)addr == NULL || len == 0);
|
||||
|
||||
flush_dcache_range(addr & ~(CACHELINE_SIZE - 1),
|
||||
ALIGN(addr + len, CACHELINE_SIZE));
|
||||
}
|
||||
|
||||
/**
|
||||
* invalidates the address passed till the length
|
||||
*
|
||||
* @param addr pointer to memory region to be invalidates
|
||||
* @param len the length of the cache line to be invalidated
|
||||
* @return none
|
||||
*/
|
||||
void xhci_inval_cache(uint32_t addr, u32 len)
|
||||
{
|
||||
BUG_ON((void *)addr == NULL || len == 0);
|
||||
|
||||
invalidate_dcache_range(addr & ~(CACHELINE_SIZE - 1),
|
||||
ALIGN(addr + len, CACHELINE_SIZE));
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* frees the "segment" pointer passed
|
||||
*
|
||||
* @param ptr pointer to "segement" to be freed
|
||||
* @return none
|
||||
*/
|
||||
static void xhci_segment_free(struct xhci_segment *seg)
|
||||
{
|
||||
free(seg->trbs);
|
||||
seg->trbs = NULL;
|
||||
|
||||
free(seg);
|
||||
}
|
||||
|
||||
/**
|
||||
* frees the "ring" pointer passed
|
||||
*
|
||||
* @param ptr pointer to "ring" to be freed
|
||||
* @return none
|
||||
*/
|
||||
static void xhci_ring_free(struct xhci_ring *ring)
|
||||
{
|
||||
struct xhci_segment *seg;
|
||||
struct xhci_segment *first_seg;
|
||||
|
||||
BUG_ON(!ring);
|
||||
|
||||
first_seg = ring->first_seg;
|
||||
seg = first_seg->next;
|
||||
while (seg != first_seg) {
|
||||
struct xhci_segment *next = seg->next;
|
||||
xhci_segment_free(seg);
|
||||
seg = next;
|
||||
}
|
||||
xhci_segment_free(first_seg);
|
||||
|
||||
free(ring);
|
||||
}
|
||||
|
||||
/**
|
||||
* frees the "xhci_container_ctx" pointer passed
|
||||
*
|
||||
* @param ptr pointer to "xhci_container_ctx" to be freed
|
||||
* @return none
|
||||
*/
|
||||
static void xhci_free_container_ctx(struct xhci_container_ctx *ctx)
|
||||
{
|
||||
free(ctx->bytes);
|
||||
free(ctx);
|
||||
}
|
||||
|
||||
/**
|
||||
* frees the virtual devices for "xhci_ctrl" pointer passed
|
||||
*
|
||||
* @param ptr pointer to "xhci_ctrl" whose virtual devices are to be freed
|
||||
* @return none
|
||||
*/
|
||||
static void xhci_free_virt_devices(struct xhci_ctrl *ctrl)
|
||||
{
|
||||
int i;
|
||||
int slot_id;
|
||||
struct xhci_virt_device *virt_dev;
|
||||
|
||||
/*
|
||||
* refactored here to loop through all virt_dev
|
||||
* Slot ID 0 is reserved
|
||||
*/
|
||||
for (slot_id = 0; slot_id < MAX_HC_SLOTS; slot_id++) {
|
||||
virt_dev = ctrl->devs[slot_id];
|
||||
if (!virt_dev)
|
||||
continue;
|
||||
|
||||
ctrl->dcbaa->dev_context_ptrs[slot_id] = 0;
|
||||
|
||||
for (i = 0; i < 31; ++i)
|
||||
if (virt_dev->eps[i].ring)
|
||||
xhci_ring_free(virt_dev->eps[i].ring);
|
||||
|
||||
if (virt_dev->in_ctx)
|
||||
xhci_free_container_ctx(virt_dev->in_ctx);
|
||||
if (virt_dev->out_ctx)
|
||||
xhci_free_container_ctx(virt_dev->out_ctx);
|
||||
|
||||
free(virt_dev);
|
||||
/* make sure we are pointing to NULL */
|
||||
ctrl->devs[slot_id] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* frees all the memory allocated
|
||||
*
|
||||
* @param ptr pointer to "xhci_ctrl" to be cleaned up
|
||||
* @return none
|
||||
*/
|
||||
void xhci_cleanup(struct xhci_ctrl *ctrl)
|
||||
{
|
||||
xhci_ring_free(ctrl->event_ring);
|
||||
xhci_ring_free(ctrl->cmd_ring);
|
||||
xhci_free_virt_devices(ctrl);
|
||||
free(ctrl->erst.entries);
|
||||
free(ctrl->dcbaa);
|
||||
memset(ctrl, '\0', sizeof(struct xhci_ctrl));
|
||||
}
|
||||
|
||||
/**
|
||||
* Malloc the aligned memory
|
||||
*
|
||||
* @param size size of memory to be allocated
|
||||
* @return allocates the memory and returns the aligned pointer
|
||||
*/
|
||||
static void *xhci_malloc(unsigned int size)
|
||||
{
|
||||
void *ptr;
|
||||
size_t cacheline_size = max(XHCI_ALIGNMENT, CACHELINE_SIZE);
|
||||
|
||||
ptr = memalign(cacheline_size, ALIGN(size, cacheline_size));
|
||||
BUG_ON(!ptr);
|
||||
memset(ptr, '\0', size);
|
||||
|
||||
xhci_flush_cache((uint32_t)ptr, size);
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
/**
|
||||
* Make the prev segment point to the next segment.
|
||||
* Change the last TRB in the prev segment to be a Link TRB which points to the
|
||||
* address of the next segment. The caller needs to set any Link TRB
|
||||
* related flags, such as End TRB, Toggle Cycle, and no snoop.
|
||||
*
|
||||
* @param prev pointer to the previous segment
|
||||
* @param next pointer to the next segment
|
||||
* @param link_trbs flag to indicate whether to link the trbs or NOT
|
||||
* @return none
|
||||
*/
|
||||
static void xhci_link_segments(struct xhci_segment *prev,
|
||||
struct xhci_segment *next, bool link_trbs)
|
||||
{
|
||||
u32 val;
|
||||
u64 val_64 = 0;
|
||||
|
||||
if (!prev || !next)
|
||||
return;
|
||||
prev->next = next;
|
||||
if (link_trbs) {
|
||||
val_64 = (uintptr_t)next->trbs;
|
||||
prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = val_64;
|
||||
|
||||
/*
|
||||
* Set the last TRB in the segment to
|
||||
* have a TRB type ID of Link TRB
|
||||
*/
|
||||
val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
|
||||
val &= ~TRB_TYPE_BITMASK;
|
||||
val |= (TRB_LINK << TRB_TYPE_SHIFT);
|
||||
|
||||
prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialises the Ring's enqueue,dequeue,enq_seg pointers
|
||||
*
|
||||
* @param ring pointer to the RING to be intialised
|
||||
* @return none
|
||||
*/
|
||||
static void xhci_initialize_ring_info(struct xhci_ring *ring)
|
||||
{
|
||||
/*
|
||||
* The ring is empty, so the enqueue pointer == dequeue pointer
|
||||
*/
|
||||
ring->enqueue = ring->first_seg->trbs;
|
||||
ring->enq_seg = ring->first_seg;
|
||||
ring->dequeue = ring->enqueue;
|
||||
ring->deq_seg = ring->first_seg;
|
||||
|
||||
/*
|
||||
* The ring is initialized to 0. The producer must write 1 to the
|
||||
* cycle bit to handover ownership of the TRB, so PCS = 1.
|
||||
* The consumer must compare CCS to the cycle bit to
|
||||
* check ownership, so CCS = 1.
|
||||
*/
|
||||
ring->cycle_state = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Allocates a generic ring segment from the ring pool, sets the dma address,
|
||||
* initializes the segment to zero, and sets the private next pointer to NULL.
|
||||
* Section 4.11.1.1:
|
||||
* "All components of all Command and Transfer TRBs shall be initialized to '0'"
|
||||
*
|
||||
* @param none
|
||||
* @return pointer to the newly allocated SEGMENT
|
||||
*/
|
||||
static struct xhci_segment *xhci_segment_alloc(void)
|
||||
{
|
||||
struct xhci_segment *seg;
|
||||
|
||||
seg = (struct xhci_segment *)malloc(sizeof(struct xhci_segment));
|
||||
BUG_ON(!seg);
|
||||
|
||||
seg->trbs = (union xhci_trb *)xhci_malloc(SEGMENT_SIZE);
|
||||
|
||||
seg->next = NULL;
|
||||
|
||||
return seg;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new ring with zero or more segments.
|
||||
* TODO: current code only uses one-time-allocated single-segment rings
|
||||
* of 1KB anyway, so we might as well get rid of all the segment and
|
||||
* linking code (and maybe increase the size a bit, e.g. 4KB).
|
||||
*
|
||||
*
|
||||
* Link each segment together into a ring.
|
||||
* Set the end flag and the cycle toggle bit on the last segment.
|
||||
* See section 4.9.2 and figures 15 and 16 of XHCI spec rev1.0.
|
||||
*
|
||||
* @param num_segs number of segments in the ring
|
||||
* @param link_trbs flag to indicate whether to link the trbs or NOT
|
||||
* @return pointer to the newly created RING
|
||||
*/
|
||||
struct xhci_ring *xhci_ring_alloc(unsigned int num_segs, bool link_trbs)
|
||||
{
|
||||
struct xhci_ring *ring;
|
||||
struct xhci_segment *prev;
|
||||
|
||||
ring = (struct xhci_ring *)malloc(sizeof(struct xhci_ring));
|
||||
BUG_ON(!ring);
|
||||
|
||||
if (num_segs == 0)
|
||||
return ring;
|
||||
|
||||
ring->first_seg = xhci_segment_alloc();
|
||||
BUG_ON(!ring->first_seg);
|
||||
|
||||
num_segs--;
|
||||
|
||||
prev = ring->first_seg;
|
||||
while (num_segs > 0) {
|
||||
struct xhci_segment *next;
|
||||
|
||||
next = xhci_segment_alloc();
|
||||
BUG_ON(!next);
|
||||
|
||||
xhci_link_segments(prev, next, link_trbs);
|
||||
|
||||
prev = next;
|
||||
num_segs--;
|
||||
}
|
||||
xhci_link_segments(prev, ring->first_seg, link_trbs);
|
||||
if (link_trbs) {
|
||||
/* See section 4.9.2.1 and 6.4.4.1 */
|
||||
prev->trbs[TRBS_PER_SEGMENT-1].link.control |=
|
||||
cpu_to_le32(LINK_TOGGLE);
|
||||
}
|
||||
xhci_initialize_ring_info(ring);
|
||||
|
||||
return ring;
|
||||
}
|
||||
|
||||
/**
|
||||
* Allocates the Container context
|
||||
*
|
||||
* @param ctrl Host controller data structure
|
||||
* @param type type of XHCI Container Context
|
||||
* @return NULL if failed else pointer to the context on success
|
||||
*/
|
||||
static struct xhci_container_ctx
|
||||
*xhci_alloc_container_ctx(struct xhci_ctrl *ctrl, int type)
|
||||
{
|
||||
struct xhci_container_ctx *ctx;
|
||||
|
||||
ctx = (struct xhci_container_ctx *)
|
||||
malloc(sizeof(struct xhci_container_ctx));
|
||||
BUG_ON(!ctx);
|
||||
|
||||
BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT));
|
||||
ctx->type = type;
|
||||
ctx->size = (MAX_EP_CTX_NUM + 1) *
|
||||
CTX_SIZE(readl(&ctrl->hccr->cr_hccparams));
|
||||
if (type == XHCI_CTX_TYPE_INPUT)
|
||||
ctx->size += CTX_SIZE(readl(&ctrl->hccr->cr_hccparams));
|
||||
|
||||
ctx->bytes = (u8 *)xhci_malloc(ctx->size);
|
||||
|
||||
return ctx;
|
||||
}
|
||||
|
||||
/**
|
||||
* Allocating virtual device
|
||||
*
|
||||
* @param udev pointer to USB deivce structure
|
||||
* @return 0 on success else -1 on failure
|
||||
*/
|
||||
int xhci_alloc_virt_device(struct usb_device *udev)
|
||||
{
|
||||
u64 byte_64 = 0;
|
||||
unsigned int slot_id = udev->slot_id;
|
||||
struct xhci_virt_device *virt_dev;
|
||||
struct xhci_ctrl *ctrl = udev->controller;
|
||||
|
||||
/* Slot ID 0 is reserved */
|
||||
if (ctrl->devs[slot_id]) {
|
||||
printf("Virt dev for slot[%d] already allocated\n", slot_id);
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
ctrl->devs[slot_id] = (struct xhci_virt_device *)
|
||||
malloc(sizeof(struct xhci_virt_device));
|
||||
|
||||
if (!ctrl->devs[slot_id]) {
|
||||
puts("Failed to allocate virtual device\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(ctrl->devs[slot_id], 0, sizeof(struct xhci_virt_device));
|
||||
virt_dev = ctrl->devs[slot_id];
|
||||
|
||||
/* Allocate the (output) device context that will be used in the HC. */
|
||||
virt_dev->out_ctx = xhci_alloc_container_ctx(ctrl,
|
||||
XHCI_CTX_TYPE_DEVICE);
|
||||
if (!virt_dev->out_ctx) {
|
||||
puts("Failed to allocate out context for virt dev\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Allocate the (input) device context for address device command */
|
||||
virt_dev->in_ctx = xhci_alloc_container_ctx(ctrl,
|
||||
XHCI_CTX_TYPE_INPUT);
|
||||
if (!virt_dev->in_ctx) {
|
||||
puts("Failed to allocate in context for virt dev\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Allocate endpoint 0 ring */
|
||||
virt_dev->eps[0].ring = xhci_ring_alloc(1, true);
|
||||
|
||||
byte_64 = (uintptr_t)(virt_dev->out_ctx->bytes);
|
||||
|
||||
/* Point to output device context in dcbaa. */
|
||||
ctrl->dcbaa->dev_context_ptrs[slot_id] = byte_64;
|
||||
|
||||
xhci_flush_cache((uint32_t)&ctrl->dcbaa->dev_context_ptrs[slot_id],
|
||||
sizeof(__le64));
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Allocates the necessary data structures
|
||||
* for XHCI host controller
|
||||
*
|
||||
* @param ctrl Host controller data structure
|
||||
* @param hccr pointer to HOST Controller Control Registers
|
||||
* @param hcor pointer to HOST Controller Operational Registers
|
||||
* @return 0 if successful else -1 on failure
|
||||
*/
|
||||
int xhci_mem_init(struct xhci_ctrl *ctrl, struct xhci_hccr *hccr,
|
||||
struct xhci_hcor *hcor)
|
||||
{
|
||||
uint64_t val_64;
|
||||
uint64_t trb_64;
|
||||
uint32_t val;
|
||||
unsigned long deq;
|
||||
int i;
|
||||
struct xhci_segment *seg;
|
||||
|
||||
/* DCBAA initialization */
|
||||
ctrl->dcbaa = (struct xhci_device_context_array *)
|
||||
xhci_malloc(sizeof(struct xhci_device_context_array));
|
||||
if (ctrl->dcbaa == NULL) {
|
||||
puts("unable to allocate DCBA\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
val_64 = (uintptr_t)ctrl->dcbaa;
|
||||
/* Set the pointer in DCBAA register */
|
||||
xhci_writeq(&hcor->or_dcbaap, val_64);
|
||||
|
||||
/* Command ring control pointer register initialization */
|
||||
ctrl->cmd_ring = xhci_ring_alloc(1, true);
|
||||
|
||||
/* Set the address in the Command Ring Control register */
|
||||
trb_64 = (uintptr_t)ctrl->cmd_ring->first_seg->trbs;
|
||||
val_64 = xhci_readq(&hcor->or_crcr);
|
||||
val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
|
||||
(trb_64 & (u64) ~CMD_RING_RSVD_BITS) |
|
||||
ctrl->cmd_ring->cycle_state;
|
||||
xhci_writeq(&hcor->or_crcr, val_64);
|
||||
|
||||
/* write the address of db register */
|
||||
val = xhci_readl(&hccr->cr_dboff);
|
||||
val &= DBOFF_MASK;
|
||||
ctrl->dba = (struct xhci_doorbell_array *)((char *)hccr + val);
|
||||
|
||||
/* write the address of runtime register */
|
||||
val = xhci_readl(&hccr->cr_rtsoff);
|
||||
val &= RTSOFF_MASK;
|
||||
ctrl->run_regs = (struct xhci_run_regs *)((char *)hccr + val);
|
||||
|
||||
/* writting the address of ir_set structure */
|
||||
ctrl->ir_set = &ctrl->run_regs->ir_set[0];
|
||||
|
||||
/* Event ring does not maintain link TRB */
|
||||
ctrl->event_ring = xhci_ring_alloc(ERST_NUM_SEGS, false);
|
||||
ctrl->erst.entries = (struct xhci_erst_entry *)
|
||||
xhci_malloc(sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS);
|
||||
|
||||
ctrl->erst.num_entries = ERST_NUM_SEGS;
|
||||
|
||||
for (val = 0, seg = ctrl->event_ring->first_seg;
|
||||
val < ERST_NUM_SEGS;
|
||||
val++) {
|
||||
trb_64 = 0;
|
||||
trb_64 = (uintptr_t)seg->trbs;
|
||||
struct xhci_erst_entry *entry = &ctrl->erst.entries[val];
|
||||
xhci_writeq(&entry->seg_addr, trb_64);
|
||||
entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
|
||||
entry->rsvd = 0;
|
||||
seg = seg->next;
|
||||
}
|
||||
xhci_flush_cache((uint32_t)ctrl->erst.entries,
|
||||
ERST_NUM_SEGS * sizeof(struct xhci_erst_entry));
|
||||
|
||||
deq = (unsigned long)ctrl->event_ring->dequeue;
|
||||
|
||||
/* Update HC event ring dequeue pointer */
|
||||
xhci_writeq(&ctrl->ir_set->erst_dequeue,
|
||||
(u64)deq & (u64)~ERST_PTR_MASK);
|
||||
|
||||
/* set ERST count with the number of entries in the segment table */
|
||||
val = xhci_readl(&ctrl->ir_set->erst_size);
|
||||
val &= ERST_SIZE_MASK;
|
||||
val |= ERST_NUM_SEGS;
|
||||
xhci_writel(&ctrl->ir_set->erst_size, val);
|
||||
|
||||
/* this is the event ring segment table pointer */
|
||||
val_64 = xhci_readq(&ctrl->ir_set->erst_base);
|
||||
val_64 &= ERST_PTR_MASK;
|
||||
val_64 |= ((u32)(ctrl->erst.entries) & ~ERST_PTR_MASK);
|
||||
|
||||
xhci_writeq(&ctrl->ir_set->erst_base, val_64);
|
||||
|
||||
/* initializing the virtual devices to NULL */
|
||||
for (i = 0; i < MAX_HC_SLOTS; ++i)
|
||||
ctrl->devs[i] = NULL;
|
||||
|
||||
/*
|
||||
* Just Zero'ing this register completely,
|
||||
* or some spurious Device Notification Events
|
||||
* might screw things here.
|
||||
*/
|
||||
xhci_writel(&hcor->or_dnctrl, 0x0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Give the input control context for the passed container context
|
||||
*
|
||||
* @param ctx pointer to the context
|
||||
* @return pointer to the Input control context data
|
||||
*/
|
||||
struct xhci_input_control_ctx
|
||||
*xhci_get_input_control_ctx(struct xhci_container_ctx *ctx)
|
||||
{
|
||||
BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT);
|
||||
return (struct xhci_input_control_ctx *)ctx->bytes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Give the slot context for the passed container context
|
||||
*
|
||||
* @param ctrl Host controller data structure
|
||||
* @param ctx pointer to the context
|
||||
* @return pointer to the slot control context data
|
||||
*/
|
||||
struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_ctrl *ctrl,
|
||||
struct xhci_container_ctx *ctx)
|
||||
{
|
||||
if (ctx->type == XHCI_CTX_TYPE_DEVICE)
|
||||
return (struct xhci_slot_ctx *)ctx->bytes;
|
||||
|
||||
return (struct xhci_slot_ctx *)
|
||||
(ctx->bytes + CTX_SIZE(readl(&ctrl->hccr->cr_hccparams)));
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the EP context from based on the ep_index
|
||||
*
|
||||
* @param ctrl Host controller data structure
|
||||
* @param ctx context container
|
||||
* @param ep_index index of the endpoint
|
||||
* @return pointer to the End point context
|
||||
*/
|
||||
struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_ctrl *ctrl,
|
||||
struct xhci_container_ctx *ctx,
|
||||
unsigned int ep_index)
|
||||
{
|
||||
/* increment ep index by offset of start of ep ctx array */
|
||||
ep_index++;
|
||||
if (ctx->type == XHCI_CTX_TYPE_INPUT)
|
||||
ep_index++;
|
||||
|
||||
return (struct xhci_ep_ctx *)
|
||||
(ctx->bytes +
|
||||
(ep_index * CTX_SIZE(readl(&ctrl->hccr->cr_hccparams))));
|
||||
}
|
||||
|
||||
/**
|
||||
* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
|
||||
* Useful when you want to change one particular aspect of the endpoint
|
||||
* and then issue a configure endpoint command.
|
||||
*
|
||||
* @param ctrl Host controller data structure
|
||||
* @param in_ctx contains the input context
|
||||
* @param out_ctx contains the input context
|
||||
* @param ep_index index of the end point
|
||||
* @return none
|
||||
*/
|
||||
void xhci_endpoint_copy(struct xhci_ctrl *ctrl,
|
||||
struct xhci_container_ctx *in_ctx,
|
||||
struct xhci_container_ctx *out_ctx,
|
||||
unsigned int ep_index)
|
||||
{
|
||||
struct xhci_ep_ctx *out_ep_ctx;
|
||||
struct xhci_ep_ctx *in_ep_ctx;
|
||||
|
||||
out_ep_ctx = xhci_get_ep_ctx(ctrl, out_ctx, ep_index);
|
||||
in_ep_ctx = xhci_get_ep_ctx(ctrl, in_ctx, ep_index);
|
||||
|
||||
in_ep_ctx->ep_info = out_ep_ctx->ep_info;
|
||||
in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
|
||||
in_ep_ctx->deq = out_ep_ctx->deq;
|
||||
in_ep_ctx->tx_info = out_ep_ctx->tx_info;
|
||||
}
|
||||
|
||||
/**
|
||||
* Copy output xhci_slot_ctx to the input xhci_slot_ctx.
|
||||
* Useful when you want to change one particular aspect of the endpoint
|
||||
* and then issue a configure endpoint command.
|
||||
* Only the context entries field matters, but
|
||||
* we'll copy the whole thing anyway.
|
||||
*
|
||||
* @param ctrl Host controller data structure
|
||||
* @param in_ctx contains the inpout context
|
||||
* @param out_ctx contains the inpout context
|
||||
* @return none
|
||||
*/
|
||||
void xhci_slot_copy(struct xhci_ctrl *ctrl, struct xhci_container_ctx *in_ctx,
|
||||
struct xhci_container_ctx *out_ctx)
|
||||
{
|
||||
struct xhci_slot_ctx *in_slot_ctx;
|
||||
struct xhci_slot_ctx *out_slot_ctx;
|
||||
|
||||
in_slot_ctx = xhci_get_slot_ctx(ctrl, in_ctx);
|
||||
out_slot_ctx = xhci_get_slot_ctx(ctrl, out_ctx);
|
||||
|
||||
in_slot_ctx->dev_info = out_slot_ctx->dev_info;
|
||||
in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2;
|
||||
in_slot_ctx->tt_info = out_slot_ctx->tt_info;
|
||||
in_slot_ctx->dev_state = out_slot_ctx->dev_state;
|
||||
}
|
||||
|
||||
/**
|
||||
* Setup an xHCI virtual device for a Set Address command
|
||||
*
|
||||
* @param udev pointer to the Device Data Structure
|
||||
* @return returns negative value on failure else 0 on success
|
||||
*/
|
||||
void xhci_setup_addressable_virt_dev(struct usb_device *udev)
|
||||
{
|
||||
struct usb_device *hop = udev;
|
||||
struct xhci_virt_device *virt_dev;
|
||||
struct xhci_ep_ctx *ep0_ctx;
|
||||
struct xhci_slot_ctx *slot_ctx;
|
||||
u32 port_num = 0;
|
||||
u64 trb_64 = 0;
|
||||
struct xhci_ctrl *ctrl = udev->controller;
|
||||
|
||||
virt_dev = ctrl->devs[udev->slot_id];
|
||||
|
||||
BUG_ON(!virt_dev);
|
||||
|
||||
/* Extract the EP0 and Slot Ctrl */
|
||||
ep0_ctx = xhci_get_ep_ctx(ctrl, virt_dev->in_ctx, 0);
|
||||
slot_ctx = xhci_get_slot_ctx(ctrl, virt_dev->in_ctx);
|
||||
|
||||
/* Only the control endpoint is valid - one endpoint context */
|
||||
slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | 0);
|
||||
|
||||
switch (udev->speed) {
|
||||
case USB_SPEED_SUPER:
|
||||
slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
|
||||
break;
|
||||
case USB_SPEED_HIGH:
|
||||
slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
|
||||
break;
|
||||
case USB_SPEED_FULL:
|
||||
slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
|
||||
break;
|
||||
case USB_SPEED_LOW:
|
||||
slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS);
|
||||
break;
|
||||
default:
|
||||
/* Speed was set earlier, this shouldn't happen. */
|
||||
BUG();
|
||||
}
|
||||
|
||||
/* Extract the root hub port number */
|
||||
if (hop->parent)
|
||||
while (hop->parent->parent)
|
||||
hop = hop->parent;
|
||||
port_num = hop->portnr;
|
||||
debug("port_num = %d\n", port_num);
|
||||
|
||||
slot_ctx->dev_info2 |=
|
||||
cpu_to_le32(((port_num & ROOT_HUB_PORT_MASK) <<
|
||||
ROOT_HUB_PORT_SHIFT));
|
||||
|
||||
/* Step 4 - ring already allocated */
|
||||
/* Step 5 */
|
||||
ep0_ctx->ep_info2 = cpu_to_le32(CTRL_EP << EP_TYPE_SHIFT);
|
||||
debug("SPEED = %d\n", udev->speed);
|
||||
|
||||
switch (udev->speed) {
|
||||
case USB_SPEED_SUPER:
|
||||
ep0_ctx->ep_info2 |= cpu_to_le32(((512 & MAX_PACKET_MASK) <<
|
||||
MAX_PACKET_SHIFT));
|
||||
debug("Setting Packet size = 512bytes\n");
|
||||
break;
|
||||
case USB_SPEED_HIGH:
|
||||
/* USB core guesses at a 64-byte max packet first for FS devices */
|
||||
case USB_SPEED_FULL:
|
||||
ep0_ctx->ep_info2 |= cpu_to_le32(((64 & MAX_PACKET_MASK) <<
|
||||
MAX_PACKET_SHIFT));
|
||||
debug("Setting Packet size = 64bytes\n");
|
||||
break;
|
||||
case USB_SPEED_LOW:
|
||||
ep0_ctx->ep_info2 |= cpu_to_le32(((8 & MAX_PACKET_MASK) <<
|
||||
MAX_PACKET_SHIFT));
|
||||
debug("Setting Packet size = 8bytes\n");
|
||||
break;
|
||||
default:
|
||||
/* New speed? */
|
||||
BUG();
|
||||
}
|
||||
|
||||
/* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
|
||||
ep0_ctx->ep_info2 |=
|
||||
cpu_to_le32(((0 & MAX_BURST_MASK) << MAX_BURST_SHIFT) |
|
||||
((3 & ERROR_COUNT_MASK) << ERROR_COUNT_SHIFT));
|
||||
|
||||
trb_64 = (uintptr_t)virt_dev->eps[0].ring->first_seg->trbs;
|
||||
ep0_ctx->deq = cpu_to_le64(trb_64 | virt_dev->eps[0].ring->cycle_state);
|
||||
|
||||
/* Steps 7 and 8 were done in xhci_alloc_virt_device() */
|
||||
|
||||
xhci_flush_cache((uint32_t)ep0_ctx, sizeof(struct xhci_ep_ctx));
|
||||
xhci_flush_cache((uint32_t)slot_ctx, sizeof(struct xhci_slot_ctx));
|
||||
}
|
939
drivers/usb/host/xhci-ring.c
Normal file
939
drivers/usb/host/xhci-ring.c
Normal file
@ -0,0 +1,939 @@
|
||||
/*
|
||||
* USB HOST XHCI Controller stack
|
||||
*
|
||||
* Based on xHCI host controller driver in linux-kernel
|
||||
* by Sarah Sharp.
|
||||
*
|
||||
* Copyright (C) 2008 Intel Corp.
|
||||
* Author: Sarah Sharp
|
||||
*
|
||||
* Copyright (C) 2013 Samsung Electronics Co.Ltd
|
||||
* Authors: Vivek Gautam <gautam.vivek@samsung.com>
|
||||
* Vikas Sajjan <vikas.sajjan@samsung.com>
|
||||
*
|
||||
* SPDX-License-Identifier: GPL-2.0+
|
||||
*/
|
||||
|
||||
#include <common.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include <usb.h>
|
||||
#include <asm/unaligned.h>
|
||||
#include <asm-generic/errno.h>
|
||||
|
||||
#include "xhci.h"
|
||||
|
||||
/**
|
||||
* Is this TRB a link TRB or was the last TRB the last TRB in this event ring
|
||||
* segment? I.e. would the updated event TRB pointer step off the end of the
|
||||
* event seg ?
|
||||
*
|
||||
* @param ctrl Host controller data structure
|
||||
* @param ring pointer to the ring
|
||||
* @param seg poniter to the segment to which TRB belongs
|
||||
* @param trb poniter to the ring trb
|
||||
* @return 1 if this TRB a link TRB else 0
|
||||
*/
|
||||
static int last_trb(struct xhci_ctrl *ctrl, struct xhci_ring *ring,
|
||||
struct xhci_segment *seg, union xhci_trb *trb)
|
||||
{
|
||||
if (ring == ctrl->event_ring)
|
||||
return trb == &seg->trbs[TRBS_PER_SEGMENT];
|
||||
else
|
||||
return TRB_TYPE_LINK_LE32(trb->link.control);
|
||||
}
|
||||
|
||||
/**
|
||||
* Does this link TRB point to the first segment in a ring,
|
||||
* or was the previous TRB the last TRB on the last segment in the ERST?
|
||||
*
|
||||
* @param ctrl Host controller data structure
|
||||
* @param ring pointer to the ring
|
||||
* @param seg poniter to the segment to which TRB belongs
|
||||
* @param trb poniter to the ring trb
|
||||
* @return 1 if this TRB is the last TRB on the last segment else 0
|
||||
*/
|
||||
static bool last_trb_on_last_seg(struct xhci_ctrl *ctrl,
|
||||
struct xhci_ring *ring,
|
||||
struct xhci_segment *seg,
|
||||
union xhci_trb *trb)
|
||||
{
|
||||
if (ring == ctrl->event_ring)
|
||||
return ((trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
|
||||
(seg->next == ring->first_seg));
|
||||
else
|
||||
return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
|
||||
}
|
||||
|
||||
/**
|
||||
* See Cycle bit rules. SW is the consumer for the event ring only.
|
||||
* Don't make a ring full of link TRBs. That would be dumb and this would loop.
|
||||
*
|
||||
* If we've just enqueued a TRB that is in the middle of a TD (meaning the
|
||||
* chain bit is set), then set the chain bit in all the following link TRBs.
|
||||
* If we've enqueued the last TRB in a TD, make sure the following link TRBs
|
||||
* have their chain bit cleared (so that each Link TRB is a separate TD).
|
||||
*
|
||||
* Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
|
||||
* set, but other sections talk about dealing with the chain bit set. This was
|
||||
* fixed in the 0.96 specification errata, but we have to assume that all 0.95
|
||||
* xHCI hardware can't handle the chain bit being cleared on a link TRB.
|
||||
*
|
||||
* @param ctrl Host controller data structure
|
||||
* @param ring pointer to the ring
|
||||
* @param more_trbs_coming flag to indicate whether more trbs
|
||||
* are expected or NOT.
|
||||
* Will you enqueue more TRBs before calling
|
||||
* prepare_ring()?
|
||||
* @return none
|
||||
*/
|
||||
static void inc_enq(struct xhci_ctrl *ctrl, struct xhci_ring *ring,
|
||||
bool more_trbs_coming)
|
||||
{
|
||||
u32 chain;
|
||||
union xhci_trb *next;
|
||||
|
||||
chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
|
||||
next = ++(ring->enqueue);
|
||||
|
||||
/*
|
||||
* Update the dequeue pointer further if that was a link TRB or we're at
|
||||
* the end of an event ring segment (which doesn't have link TRBS)
|
||||
*/
|
||||
while (last_trb(ctrl, ring, ring->enq_seg, next)) {
|
||||
if (ring != ctrl->event_ring) {
|
||||
/*
|
||||
* If the caller doesn't plan on enqueueing more
|
||||
* TDs before ringing the doorbell, then we
|
||||
* don't want to give the link TRB to the
|
||||
* hardware just yet. We'll give the link TRB
|
||||
* back in prepare_ring() just before we enqueue
|
||||
* the TD at the top of the ring.
|
||||
*/
|
||||
if (!chain && !more_trbs_coming)
|
||||
break;
|
||||
|
||||
/*
|
||||
* If we're not dealing with 0.95 hardware or
|
||||
* isoc rings on AMD 0.96 host,
|
||||
* carry over the chain bit of the previous TRB
|
||||
* (which may mean the chain bit is cleared).
|
||||
*/
|
||||
next->link.control &= cpu_to_le32(~TRB_CHAIN);
|
||||
next->link.control |= cpu_to_le32(chain);
|
||||
|
||||
next->link.control ^= cpu_to_le32(TRB_CYCLE);
|
||||
xhci_flush_cache((uint32_t)next,
|
||||
sizeof(union xhci_trb));
|
||||
}
|
||||
/* Toggle the cycle bit after the last ring segment. */
|
||||
if (last_trb_on_last_seg(ctrl, ring,
|
||||
ring->enq_seg, next))
|
||||
ring->cycle_state = (ring->cycle_state ? 0 : 1);
|
||||
|
||||
ring->enq_seg = ring->enq_seg->next;
|
||||
ring->enqueue = ring->enq_seg->trbs;
|
||||
next = ring->enqueue;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* See Cycle bit rules. SW is the consumer for the event ring only.
|
||||
* Don't make a ring full of link TRBs. That would be dumb and this would loop.
|
||||
*
|
||||
* @param ctrl Host controller data structure
|
||||
* @param ring Ring whose Dequeue TRB pointer needs to be incremented.
|
||||
* return none
|
||||
*/
|
||||
static void inc_deq(struct xhci_ctrl *ctrl, struct xhci_ring *ring)
|
||||
{
|
||||
do {
|
||||
/*
|
||||
* Update the dequeue pointer further if that was a link TRB or
|
||||
* we're at the end of an event ring segment (which doesn't have
|
||||
* link TRBS)
|
||||
*/
|
||||
if (last_trb(ctrl, ring, ring->deq_seg, ring->dequeue)) {
|
||||
if (ring == ctrl->event_ring &&
|
||||
last_trb_on_last_seg(ctrl, ring,
|
||||
ring->deq_seg, ring->dequeue)) {
|
||||
ring->cycle_state = (ring->cycle_state ? 0 : 1);
|
||||
}
|
||||
ring->deq_seg = ring->deq_seg->next;
|
||||
ring->dequeue = ring->deq_seg->trbs;
|
||||
} else {
|
||||
ring->dequeue++;
|
||||
}
|
||||
} while (last_trb(ctrl, ring, ring->deq_seg, ring->dequeue));
|
||||
}
|
||||
|
||||
/**
|
||||
* Generic function for queueing a TRB on a ring.
|
||||
* The caller must have checked to make sure there's room on the ring.
|
||||
*
|
||||
* @param more_trbs_coming: Will you enqueue more TRBs before calling
|
||||
* prepare_ring()?
|
||||
* @param ctrl Host controller data structure
|
||||
* @param ring pointer to the ring
|
||||
* @param more_trbs_coming flag to indicate whether more trbs
|
||||
* @param trb_fields pointer to trb field array containing TRB contents
|
||||
* @return pointer to the enqueued trb
|
||||
*/
|
||||
static struct xhci_generic_trb *queue_trb(struct xhci_ctrl *ctrl,
|
||||
struct xhci_ring *ring,
|
||||
bool more_trbs_coming,
|
||||
unsigned int *trb_fields)
|
||||
{
|
||||
struct xhci_generic_trb *trb;
|
||||
int i;
|
||||
|
||||
trb = &ring->enqueue->generic;
|
||||
|
||||
for (i = 0; i < 4; i++)
|
||||
trb->field[i] = cpu_to_le32(trb_fields[i]);
|
||||
|
||||
xhci_flush_cache((uint32_t)trb, sizeof(struct xhci_generic_trb));
|
||||
|
||||
inc_enq(ctrl, ring, more_trbs_coming);
|
||||
|
||||
return trb;
|
||||
}
|
||||
|
||||
/**
|
||||
* Does various checks on the endpoint ring, and makes it ready
|
||||
* to queue num_trbs.
|
||||
*
|
||||
* @param ctrl Host controller data structure
|
||||
* @param ep_ring pointer to the EP Transfer Ring
|
||||
* @param ep_state State of the End Point
|
||||
* @return error code in case of invalid ep_state, 0 on success
|
||||
*/
|
||||
static int prepare_ring(struct xhci_ctrl *ctrl, struct xhci_ring *ep_ring,
|
||||
u32 ep_state)
|
||||
{
|
||||
union xhci_trb *next = ep_ring->enqueue;
|
||||
|
||||
/* Make sure the endpoint has been added to xHC schedule */
|
||||
switch (ep_state) {
|
||||
case EP_STATE_DISABLED:
|
||||
/*
|
||||
* USB core changed config/interfaces without notifying us,
|
||||
* or hardware is reporting the wrong state.
|
||||
*/
|
||||
puts("WARN urb submitted to disabled ep\n");
|
||||
return -ENOENT;
|
||||
case EP_STATE_ERROR:
|
||||
puts("WARN waiting for error on ep to be cleared\n");
|
||||
return -EINVAL;
|
||||
case EP_STATE_HALTED:
|
||||
puts("WARN halted endpoint, queueing URB anyway.\n");
|
||||
case EP_STATE_STOPPED:
|
||||
case EP_STATE_RUNNING:
|
||||
debug("EP STATE RUNNING.\n");
|
||||
break;
|
||||
default:
|
||||
puts("ERROR unknown endpoint state for ep\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
while (last_trb(ctrl, ep_ring, ep_ring->enq_seg, next)) {
|
||||
/*
|
||||
* If we're not dealing with 0.95 hardware or isoc rings
|
||||
* on AMD 0.96 host, clear the chain bit.
|
||||
*/
|
||||
next->link.control &= cpu_to_le32(~TRB_CHAIN);
|
||||
|
||||
next->link.control ^= cpu_to_le32(TRB_CYCLE);
|
||||
|
||||
xhci_flush_cache((uint32_t)next, sizeof(union xhci_trb));
|
||||
|
||||
/* Toggle the cycle bit after the last ring segment. */
|
||||
if (last_trb_on_last_seg(ctrl, ep_ring,
|
||||
ep_ring->enq_seg, next))
|
||||
ep_ring->cycle_state = (ep_ring->cycle_state ? 0 : 1);
|
||||
ep_ring->enq_seg = ep_ring->enq_seg->next;
|
||||
ep_ring->enqueue = ep_ring->enq_seg->trbs;
|
||||
next = ep_ring->enqueue;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generic function for queueing a command TRB on the command ring.
|
||||
* Check to make sure there's room on the command ring for one command TRB.
|
||||
*
|
||||
* @param ctrl Host controller data structure
|
||||
* @param ptr Pointer address to write in the first two fields (opt.)
|
||||
* @param slot_id Slot ID to encode in the flags field (opt.)
|
||||
* @param ep_index Endpoint index to encode in the flags field (opt.)
|
||||
* @param cmd Command type to enqueue
|
||||
* @return none
|
||||
*/
|
||||
void xhci_queue_command(struct xhci_ctrl *ctrl, u8 *ptr, u32 slot_id,
|
||||
u32 ep_index, trb_type cmd)
|
||||
{
|
||||
u32 fields[4];
|
||||
u64 val_64 = (uintptr_t)ptr;
|
||||
|
||||
BUG_ON(prepare_ring(ctrl, ctrl->cmd_ring, EP_STATE_RUNNING));
|
||||
|
||||
fields[0] = lower_32_bits(val_64);
|
||||
fields[1] = upper_32_bits(val_64);
|
||||
fields[2] = 0;
|
||||
fields[3] = TRB_TYPE(cmd) | EP_ID_FOR_TRB(ep_index) |
|
||||
SLOT_ID_FOR_TRB(slot_id) | ctrl->cmd_ring->cycle_state;
|
||||
|
||||
queue_trb(ctrl, ctrl->cmd_ring, false, fields);
|
||||
|
||||
/* Ring the command ring doorbell */
|
||||
xhci_writel(&ctrl->dba->doorbell[0], DB_VALUE_HOST);
|
||||
}
|
||||
|
||||
/**
|
||||
* The TD size is the number of bytes remaining in the TD (including this TRB),
|
||||
* right shifted by 10.
|
||||
* It must fit in bits 21:17, so it can't be bigger than 31.
|
||||
*
|
||||
* @param remainder remaining packets to be sent
|
||||
* @return remainder if remainder is less than max else max
|
||||
*/
|
||||
static u32 xhci_td_remainder(unsigned int remainder)
|
||||
{
|
||||
u32 max = (1 << (21 - 17 + 1)) - 1;
|
||||
|
||||
if ((remainder >> 10) >= max)
|
||||
return max << 17;
|
||||
else
|
||||
return (remainder >> 10) << 17;
|
||||
}
|
||||
|
||||
/**
|
||||
* Finds out the remanining packets to be sent
|
||||
*
|
||||
* @param running_total total size sent so far
|
||||
* @param trb_buff_len length of the TRB Buffer
|
||||
* @param total_packet_count total packet count
|
||||
* @param maxpacketsize max packet size of current pipe
|
||||
* @param num_trbs_left number of TRBs left to be processed
|
||||
* @return 0 if running_total or trb_buff_len is 0, else remainder
|
||||
*/
|
||||
static u32 xhci_v1_0_td_remainder(int running_total,
|
||||
int trb_buff_len,
|
||||
unsigned int total_packet_count,
|
||||
int maxpacketsize,
|
||||
unsigned int num_trbs_left)
|
||||
{
|
||||
int packets_transferred;
|
||||
|
||||
/* One TRB with a zero-length data packet. */
|
||||
if (num_trbs_left == 0 || (running_total == 0 && trb_buff_len == 0))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* All the TRB queueing functions don't count the current TRB in
|
||||
* running_total.
|
||||
*/
|
||||
packets_transferred = (running_total + trb_buff_len) / maxpacketsize;
|
||||
|
||||
if ((total_packet_count - packets_transferred) > 31)
|
||||
return 31 << 17;
|
||||
return (total_packet_count - packets_transferred) << 17;
|
||||
}
|
||||
|
||||
/**
|
||||
* Ring the doorbell of the End Point
|
||||
*
|
||||
* @param udev pointer to the USB device structure
|
||||
* @param ep_index index of the endpoint
|
||||
* @param start_cycle cycle flag of the first TRB
|
||||
* @param start_trb pionter to the first TRB
|
||||
* @return none
|
||||
*/
|
||||
static void giveback_first_trb(struct usb_device *udev, int ep_index,
|
||||
int start_cycle,
|
||||
struct xhci_generic_trb *start_trb)
|
||||
{
|
||||
struct xhci_ctrl *ctrl = udev->controller;
|
||||
|
||||
/*
|
||||
* Pass all the TRBs to the hardware at once and make sure this write
|
||||
* isn't reordered.
|
||||
*/
|
||||
if (start_cycle)
|
||||
start_trb->field[3] |= cpu_to_le32(start_cycle);
|
||||
else
|
||||
start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
|
||||
|
||||
xhci_flush_cache((uint32_t)start_trb, sizeof(struct xhci_generic_trb));
|
||||
|
||||
/* Ringing EP doorbell here */
|
||||
xhci_writel(&ctrl->dba->doorbell[udev->slot_id],
|
||||
DB_VALUE(ep_index, 0));
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/**** POLLING mechanism for XHCI ****/
|
||||
|
||||
/**
|
||||
* Finalizes a handled event TRB by advancing our dequeue pointer and giving
|
||||
* the TRB back to the hardware for recycling. Must call this exactly once at
|
||||
* the end of each event handler, and not touch the TRB again afterwards.
|
||||
*
|
||||
* @param ctrl Host controller data structure
|
||||
* @return none
|
||||
*/
|
||||
void xhci_acknowledge_event(struct xhci_ctrl *ctrl)
|
||||
{
|
||||
/* Advance our dequeue pointer to the next event */
|
||||
inc_deq(ctrl, ctrl->event_ring);
|
||||
|
||||
/* Inform the hardware */
|
||||
xhci_writeq(&ctrl->ir_set->erst_dequeue,
|
||||
(uintptr_t)ctrl->event_ring->dequeue | ERST_EHB);
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if there is a new event to handle on the event ring.
|
||||
*
|
||||
* @param ctrl Host controller data structure
|
||||
* @return 0 if failure else 1 on success
|
||||
*/
|
||||
static int event_ready(struct xhci_ctrl *ctrl)
|
||||
{
|
||||
union xhci_trb *event;
|
||||
|
||||
xhci_inval_cache((uint32_t)ctrl->event_ring->dequeue,
|
||||
sizeof(union xhci_trb));
|
||||
|
||||
event = ctrl->event_ring->dequeue;
|
||||
|
||||
/* Does the HC or OS own the TRB? */
|
||||
if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
|
||||
ctrl->event_ring->cycle_state)
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Waits for a specific type of event and returns it. Discards unexpected
|
||||
* events. Caller *must* call xhci_acknowledge_event() after it is finished
|
||||
* processing the event, and must not access the returned pointer afterwards.
|
||||
*
|
||||
* @param ctrl Host controller data structure
|
||||
* @param expected TRB type expected from Event TRB
|
||||
* @return pointer to event trb
|
||||
*/
|
||||
union xhci_trb *xhci_wait_for_event(struct xhci_ctrl *ctrl, trb_type expected)
|
||||
{
|
||||
trb_type type;
|
||||
unsigned long ts = get_timer(0);
|
||||
|
||||
do {
|
||||
union xhci_trb *event = ctrl->event_ring->dequeue;
|
||||
|
||||
if (!event_ready(ctrl))
|
||||
continue;
|
||||
|
||||
type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->event_cmd.flags));
|
||||
if (type == expected)
|
||||
return event;
|
||||
|
||||
if (type == TRB_PORT_STATUS)
|
||||
/* TODO: remove this once enumeration has been reworked */
|
||||
/*
|
||||
* Port status change events always have a
|
||||
* successful completion code
|
||||
*/
|
||||
BUG_ON(GET_COMP_CODE(
|
||||
le32_to_cpu(event->generic.field[2])) !=
|
||||
COMP_SUCCESS);
|
||||
else
|
||||
printf("Unexpected XHCI event TRB, skipping... "
|
||||
"(%08x %08x %08x %08x)\n",
|
||||
le32_to_cpu(event->generic.field[0]),
|
||||
le32_to_cpu(event->generic.field[1]),
|
||||
le32_to_cpu(event->generic.field[2]),
|
||||
le32_to_cpu(event->generic.field[3]));
|
||||
|
||||
xhci_acknowledge_event(ctrl);
|
||||
} while (get_timer(ts) < XHCI_TIMEOUT);
|
||||
|
||||
if (expected == TRB_TRANSFER)
|
||||
return NULL;
|
||||
|
||||
printf("XHCI timeout on event type %d... cannot recover.\n", expected);
|
||||
BUG();
|
||||
}
|
||||
|
||||
/*
|
||||
* Stops transfer processing for an endpoint and throws away all unprocessed
|
||||
* TRBs by setting the xHC's dequeue pointer to our enqueue pointer. The next
|
||||
* xhci_bulk_tx/xhci_ctrl_tx on this enpoint will add new transfers there and
|
||||
* ring the doorbell, causing this endpoint to start working again.
|
||||
* (Careful: This will BUG() when there was no transfer in progress. Shouldn't
|
||||
* happen in practice for current uses and is too complicated to fix right now.)
|
||||
*/
|
||||
static void abort_td(struct usb_device *udev, int ep_index)
|
||||
{
|
||||
struct xhci_ctrl *ctrl = udev->controller;
|
||||
struct xhci_ring *ring = ctrl->devs[udev->slot_id]->eps[ep_index].ring;
|
||||
union xhci_trb *event;
|
||||
u32 field;
|
||||
|
||||
xhci_queue_command(ctrl, NULL, udev->slot_id, ep_index, TRB_STOP_RING);
|
||||
|
||||
event = xhci_wait_for_event(ctrl, TRB_TRANSFER);
|
||||
field = le32_to_cpu(event->trans_event.flags);
|
||||
BUG_ON(TRB_TO_SLOT_ID(field) != udev->slot_id);
|
||||
BUG_ON(TRB_TO_EP_INDEX(field) != ep_index);
|
||||
BUG_ON(GET_COMP_CODE(le32_to_cpu(event->trans_event.transfer_len
|
||||
!= COMP_STOP)));
|
||||
xhci_acknowledge_event(ctrl);
|
||||
|
||||
event = xhci_wait_for_event(ctrl, TRB_COMPLETION);
|
||||
BUG_ON(TRB_TO_SLOT_ID(le32_to_cpu(event->event_cmd.flags))
|
||||
!= udev->slot_id || GET_COMP_CODE(le32_to_cpu(
|
||||
event->event_cmd.status)) != COMP_SUCCESS);
|
||||
xhci_acknowledge_event(ctrl);
|
||||
|
||||
xhci_queue_command(ctrl, (void *)((uintptr_t)ring->enqueue |
|
||||
ring->cycle_state), udev->slot_id, ep_index, TRB_SET_DEQ);
|
||||
event = xhci_wait_for_event(ctrl, TRB_COMPLETION);
|
||||
BUG_ON(TRB_TO_SLOT_ID(le32_to_cpu(event->event_cmd.flags))
|
||||
!= udev->slot_id || GET_COMP_CODE(le32_to_cpu(
|
||||
event->event_cmd.status)) != COMP_SUCCESS);
|
||||
xhci_acknowledge_event(ctrl);
|
||||
}
|
||||
|
||||
static void record_transfer_result(struct usb_device *udev,
|
||||
union xhci_trb *event, int length)
|
||||
{
|
||||
udev->act_len = min(length, length -
|
||||
EVENT_TRB_LEN(le32_to_cpu(event->trans_event.transfer_len)));
|
||||
|
||||
switch (GET_COMP_CODE(le32_to_cpu(event->trans_event.transfer_len))) {
|
||||
case COMP_SUCCESS:
|
||||
BUG_ON(udev->act_len != length);
|
||||
/* fallthrough */
|
||||
case COMP_SHORT_TX:
|
||||
udev->status = 0;
|
||||
break;
|
||||
case COMP_STALL:
|
||||
udev->status = USB_ST_STALLED;
|
||||
break;
|
||||
case COMP_DB_ERR:
|
||||
case COMP_TRB_ERR:
|
||||
udev->status = USB_ST_BUF_ERR;
|
||||
break;
|
||||
case COMP_BABBLE:
|
||||
udev->status = USB_ST_BABBLE_DET;
|
||||
break;
|
||||
default:
|
||||
udev->status = 0x80; /* USB_ST_TOO_LAZY_TO_MAKE_A_NEW_MACRO */
|
||||
}
|
||||
}
|
||||
|
||||
/**** Bulk and Control transfer methods ****/
|
||||
/**
|
||||
* Queues up the BULK Request
|
||||
*
|
||||
* @param udev pointer to the USB device structure
|
||||
* @param pipe contains the DIR_IN or OUT , devnum
|
||||
* @param length length of the buffer
|
||||
* @param buffer buffer to be read/written based on the request
|
||||
* @return returns 0 if successful else -1 on failure
|
||||
*/
|
||||
int xhci_bulk_tx(struct usb_device *udev, unsigned long pipe,
|
||||
int length, void *buffer)
|
||||
{
|
||||
int num_trbs = 0;
|
||||
struct xhci_generic_trb *start_trb;
|
||||
bool first_trb = 0;
|
||||
int start_cycle;
|
||||
u32 field = 0;
|
||||
u32 length_field = 0;
|
||||
struct xhci_ctrl *ctrl = udev->controller;
|
||||
int slot_id = udev->slot_id;
|
||||
int ep_index;
|
||||
struct xhci_virt_device *virt_dev;
|
||||
struct xhci_ep_ctx *ep_ctx;
|
||||
struct xhci_ring *ring; /* EP transfer ring */
|
||||
union xhci_trb *event;
|
||||
|
||||
int running_total, trb_buff_len;
|
||||
unsigned int total_packet_count;
|
||||
int maxpacketsize;
|
||||
u64 addr;
|
||||
int ret;
|
||||
u32 trb_fields[4];
|
||||
u64 val_64 = (uintptr_t)buffer;
|
||||
|
||||
debug("dev=%p, pipe=%lx, buffer=%p, length=%d\n",
|
||||
udev, pipe, buffer, length);
|
||||
|
||||
ep_index = usb_pipe_ep_index(pipe);
|
||||
virt_dev = ctrl->devs[slot_id];
|
||||
|
||||
xhci_inval_cache((uint32_t)virt_dev->out_ctx->bytes,
|
||||
virt_dev->out_ctx->size);
|
||||
|
||||
ep_ctx = xhci_get_ep_ctx(ctrl, virt_dev->out_ctx, ep_index);
|
||||
|
||||
ring = virt_dev->eps[ep_index].ring;
|
||||
/*
|
||||
* How much data is (potentially) left before the 64KB boundary?
|
||||
* XHCI Spec puts restriction( TABLE 49 and 6.4.1 section of XHCI Spec)
|
||||
* that the buffer should not span 64KB boundary. if so
|
||||
* we send request in more than 1 TRB by chaining them.
|
||||
*/
|
||||
running_total = TRB_MAX_BUFF_SIZE -
|
||||
(lower_32_bits(val_64) & (TRB_MAX_BUFF_SIZE - 1));
|
||||
trb_buff_len = running_total;
|
||||
running_total &= TRB_MAX_BUFF_SIZE - 1;
|
||||
|
||||
/*
|
||||
* If there's some data on this 64KB chunk, or we have to send a
|
||||
* zero-length transfer, we need at least one TRB
|
||||
*/
|
||||
if (running_total != 0 || length == 0)
|
||||
num_trbs++;
|
||||
|
||||
/* How many more 64KB chunks to transfer, how many more TRBs? */
|
||||
while (running_total < length) {
|
||||
num_trbs++;
|
||||
running_total += TRB_MAX_BUFF_SIZE;
|
||||
}
|
||||
|
||||
/*
|
||||
* XXX: Calling routine prepare_ring() called in place of
|
||||
* prepare_trasfer() as there in 'Linux' since we are not
|
||||
* maintaining multiple TDs/transfer at the same time.
|
||||
*/
|
||||
ret = prepare_ring(ctrl, ring,
|
||||
le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Don't give the first TRB to the hardware (by toggling the cycle bit)
|
||||
* until we've finished creating all the other TRBs. The ring's cycle
|
||||
* state may change as we enqueue the other TRBs, so save it too.
|
||||
*/
|
||||
start_trb = &ring->enqueue->generic;
|
||||
start_cycle = ring->cycle_state;
|
||||
|
||||
running_total = 0;
|
||||
maxpacketsize = usb_maxpacket(udev, pipe);
|
||||
|
||||
total_packet_count = DIV_ROUND_UP(length, maxpacketsize);
|
||||
|
||||
/* How much data is in the first TRB? */
|
||||
/*
|
||||
* How much data is (potentially) left before the 64KB boundary?
|
||||
* XHCI Spec puts restriction( TABLE 49 and 6.4.1 section of XHCI Spec)
|
||||
* that the buffer should not span 64KB boundary. if so
|
||||
* we send request in more than 1 TRB by chaining them.
|
||||
*/
|
||||
addr = val_64;
|
||||
|
||||
if (trb_buff_len > length)
|
||||
trb_buff_len = length;
|
||||
|
||||
first_trb = true;
|
||||
|
||||
/* flush the buffer before use */
|
||||
xhci_flush_cache((uint32_t)buffer, length);
|
||||
|
||||
/* Queue the first TRB, even if it's zero-length */
|
||||
do {
|
||||
u32 remainder = 0;
|
||||
field = 0;
|
||||
/* Don't change the cycle bit of the first TRB until later */
|
||||
if (first_trb) {
|
||||
first_trb = false;
|
||||
if (start_cycle == 0)
|
||||
field |= TRB_CYCLE;
|
||||
} else {
|
||||
field |= ring->cycle_state;
|
||||
}
|
||||
|
||||
/*
|
||||
* Chain all the TRBs together; clear the chain bit in the last
|
||||
* TRB to indicate it's the last TRB in the chain.
|
||||
*/
|
||||
if (num_trbs > 1)
|
||||
field |= TRB_CHAIN;
|
||||
else
|
||||
field |= TRB_IOC;
|
||||
|
||||
/* Only set interrupt on short packet for IN endpoints */
|
||||
if (usb_pipein(pipe))
|
||||
field |= TRB_ISP;
|
||||
|
||||
/* Set the TRB length, TD size, and interrupter fields. */
|
||||
if (HC_VERSION(xhci_readl(&ctrl->hccr->cr_capbase)) < 0x100)
|
||||
remainder = xhci_td_remainder(length - running_total);
|
||||
else
|
||||
remainder = xhci_v1_0_td_remainder(running_total,
|
||||
trb_buff_len,
|
||||
total_packet_count,
|
||||
maxpacketsize,
|
||||
num_trbs - 1);
|
||||
|
||||
length_field = ((trb_buff_len & TRB_LEN_MASK) |
|
||||
remainder |
|
||||
((0 & TRB_INTR_TARGET_MASK) <<
|
||||
TRB_INTR_TARGET_SHIFT));
|
||||
|
||||
trb_fields[0] = lower_32_bits(addr);
|
||||
trb_fields[1] = upper_32_bits(addr);
|
||||
trb_fields[2] = length_field;
|
||||
trb_fields[3] = field | (TRB_NORMAL << TRB_TYPE_SHIFT);
|
||||
|
||||
queue_trb(ctrl, ring, (num_trbs > 1), trb_fields);
|
||||
|
||||
--num_trbs;
|
||||
|
||||
running_total += trb_buff_len;
|
||||
|
||||
/* Calculate length for next transfer */
|
||||
addr += trb_buff_len;
|
||||
trb_buff_len = min((length - running_total), TRB_MAX_BUFF_SIZE);
|
||||
} while (running_total < length);
|
||||
|
||||
giveback_first_trb(udev, ep_index, start_cycle, start_trb);
|
||||
|
||||
event = xhci_wait_for_event(ctrl, TRB_TRANSFER);
|
||||
if (!event) {
|
||||
debug("XHCI bulk transfer timed out, aborting...\n");
|
||||
abort_td(udev, ep_index);
|
||||
udev->status = USB_ST_NAK_REC; /* closest thing to a timeout */
|
||||
udev->act_len = 0;
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
field = le32_to_cpu(event->trans_event.flags);
|
||||
|
||||
BUG_ON(TRB_TO_SLOT_ID(field) != slot_id);
|
||||
BUG_ON(TRB_TO_EP_INDEX(field) != ep_index);
|
||||
BUG_ON(*(void **)(uintptr_t)le64_to_cpu(event->trans_event.buffer) -
|
||||
buffer > (size_t)length);
|
||||
|
||||
record_transfer_result(udev, event, length);
|
||||
xhci_acknowledge_event(ctrl);
|
||||
xhci_inval_cache((uint32_t)buffer, length);
|
||||
|
||||
return (udev->status != USB_ST_NOT_PROC) ? 0 : -1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Queues up the Control Transfer Request
|
||||
*
|
||||
* @param udev pointer to the USB device structure
|
||||
* @param pipe contains the DIR_IN or OUT , devnum
|
||||
* @param req request type
|
||||
* @param length length of the buffer
|
||||
* @param buffer buffer to be read/written based on the request
|
||||
* @return returns 0 if successful else error code on failure
|
||||
*/
|
||||
int xhci_ctrl_tx(struct usb_device *udev, unsigned long pipe,
|
||||
struct devrequest *req, int length,
|
||||
void *buffer)
|
||||
{
|
||||
int ret;
|
||||
int start_cycle;
|
||||
int num_trbs;
|
||||
u32 field;
|
||||
u32 length_field;
|
||||
u64 buf_64 = 0;
|
||||
struct xhci_generic_trb *start_trb;
|
||||
struct xhci_ctrl *ctrl = udev->controller;
|
||||
int slot_id = udev->slot_id;
|
||||
int ep_index;
|
||||
u32 trb_fields[4];
|
||||
struct xhci_virt_device *virt_dev = ctrl->devs[slot_id];
|
||||
struct xhci_ring *ep_ring;
|
||||
union xhci_trb *event;
|
||||
|
||||
debug("req=%u (%#x), type=%u (%#x), value=%u (%#x), index=%u\n",
|
||||
req->request, req->request,
|
||||
req->requesttype, req->requesttype,
|
||||
le16_to_cpu(req->value), le16_to_cpu(req->value),
|
||||
le16_to_cpu(req->index));
|
||||
|
||||
ep_index = usb_pipe_ep_index(pipe);
|
||||
|
||||
ep_ring = virt_dev->eps[ep_index].ring;
|
||||
|
||||
/*
|
||||
* Check to see if the max packet size for the default control
|
||||
* endpoint changed during FS device enumeration
|
||||
*/
|
||||
if (udev->speed == USB_SPEED_FULL) {
|
||||
ret = xhci_check_maxpacket(udev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
xhci_inval_cache((uint32_t)virt_dev->out_ctx->bytes,
|
||||
virt_dev->out_ctx->size);
|
||||
|
||||
struct xhci_ep_ctx *ep_ctx = NULL;
|
||||
ep_ctx = xhci_get_ep_ctx(ctrl, virt_dev->out_ctx, ep_index);
|
||||
|
||||
/* 1 TRB for setup, 1 for status */
|
||||
num_trbs = 2;
|
||||
/*
|
||||
* Don't need to check if we need additional event data and normal TRBs,
|
||||
* since data in control transfers will never get bigger than 16MB
|
||||
* XXX: can we get a buffer that crosses 64KB boundaries?
|
||||
*/
|
||||
|
||||
if (length > 0)
|
||||
num_trbs++;
|
||||
/*
|
||||
* XXX: Calling routine prepare_ring() called in place of
|
||||
* prepare_trasfer() as there in 'Linux' since we are not
|
||||
* maintaining multiple TDs/transfer at the same time.
|
||||
*/
|
||||
ret = prepare_ring(ctrl, ep_ring,
|
||||
le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Don't give the first TRB to the hardware (by toggling the cycle bit)
|
||||
* until we've finished creating all the other TRBs. The ring's cycle
|
||||
* state may change as we enqueue the other TRBs, so save it too.
|
||||
*/
|
||||
start_trb = &ep_ring->enqueue->generic;
|
||||
start_cycle = ep_ring->cycle_state;
|
||||
|
||||
debug("start_trb %p, start_cycle %d\n", start_trb, start_cycle);
|
||||
|
||||
/* Queue setup TRB - see section 6.4.1.2.1 */
|
||||
/* FIXME better way to translate setup_packet into two u32 fields? */
|
||||
field = 0;
|
||||
field |= TRB_IDT | (TRB_SETUP << TRB_TYPE_SHIFT);
|
||||
if (start_cycle == 0)
|
||||
field |= 0x1;
|
||||
|
||||
/* xHCI 1.0 6.4.1.2.1: Transfer Type field */
|
||||
if (HC_VERSION(xhci_readl(&ctrl->hccr->cr_capbase)) == 0x100) {
|
||||
if (length > 0) {
|
||||
if (req->requesttype & USB_DIR_IN)
|
||||
field |= (TRB_DATA_IN << TRB_TX_TYPE_SHIFT);
|
||||
else
|
||||
field |= (TRB_DATA_OUT << TRB_TX_TYPE_SHIFT);
|
||||
}
|
||||
}
|
||||
|
||||
debug("req->requesttype = %d, req->request = %d,"
|
||||
"le16_to_cpu(req->value) = %d,"
|
||||
"le16_to_cpu(req->index) = %d,"
|
||||
"le16_to_cpu(req->length) = %d\n",
|
||||
req->requesttype, req->request, le16_to_cpu(req->value),
|
||||
le16_to_cpu(req->index), le16_to_cpu(req->length));
|
||||
|
||||
trb_fields[0] = req->requesttype | req->request << 8 |
|
||||
le16_to_cpu(req->value) << 16;
|
||||
trb_fields[1] = le16_to_cpu(req->index) |
|
||||
le16_to_cpu(req->length) << 16;
|
||||
/* TRB_LEN | (TRB_INTR_TARGET) */
|
||||
trb_fields[2] = (8 | ((0 & TRB_INTR_TARGET_MASK) <<
|
||||
TRB_INTR_TARGET_SHIFT));
|
||||
/* Immediate data in pointer */
|
||||
trb_fields[3] = field;
|
||||
queue_trb(ctrl, ep_ring, true, trb_fields);
|
||||
|
||||
/* Re-initializing field to zero */
|
||||
field = 0;
|
||||
/* If there's data, queue data TRBs */
|
||||
/* Only set interrupt on short packet for IN endpoints */
|
||||
if (usb_pipein(pipe))
|
||||
field = TRB_ISP | (TRB_DATA << TRB_TYPE_SHIFT);
|
||||
else
|
||||
field = (TRB_DATA << TRB_TYPE_SHIFT);
|
||||
|
||||
length_field = (length & TRB_LEN_MASK) | xhci_td_remainder(length) |
|
||||
((0 & TRB_INTR_TARGET_MASK) << TRB_INTR_TARGET_SHIFT);
|
||||
debug("length_field = %d, length = %d,"
|
||||
"xhci_td_remainder(length) = %d , TRB_INTR_TARGET(0) = %d\n",
|
||||
length_field, (length & TRB_LEN_MASK),
|
||||
xhci_td_remainder(length), 0);
|
||||
|
||||
if (length > 0) {
|
||||
if (req->requesttype & USB_DIR_IN)
|
||||
field |= TRB_DIR_IN;
|
||||
buf_64 = (uintptr_t)buffer;
|
||||
|
||||
trb_fields[0] = lower_32_bits(buf_64);
|
||||
trb_fields[1] = upper_32_bits(buf_64);
|
||||
trb_fields[2] = length_field;
|
||||
trb_fields[3] = field | ep_ring->cycle_state;
|
||||
|
||||
xhci_flush_cache((uint32_t)buffer, length);
|
||||
queue_trb(ctrl, ep_ring, true, trb_fields);
|
||||
}
|
||||
|
||||
/*
|
||||
* Queue status TRB -
|
||||
* see Table 7 and sections 4.11.2.2 and 6.4.1.2.3
|
||||
*/
|
||||
|
||||
/* If the device sent data, the status stage is an OUT transfer */
|
||||
field = 0;
|
||||
if (length > 0 && req->requesttype & USB_DIR_IN)
|
||||
field = 0;
|
||||
else
|
||||
field = TRB_DIR_IN;
|
||||
|
||||
trb_fields[0] = 0;
|
||||
trb_fields[1] = 0;
|
||||
trb_fields[2] = ((0 & TRB_INTR_TARGET_MASK) << TRB_INTR_TARGET_SHIFT);
|
||||
/* Event on completion */
|
||||
trb_fields[3] = field | TRB_IOC |
|
||||
(TRB_STATUS << TRB_TYPE_SHIFT) |
|
||||
ep_ring->cycle_state;
|
||||
|
||||
queue_trb(ctrl, ep_ring, false, trb_fields);
|
||||
|
||||
giveback_first_trb(udev, ep_index, start_cycle, start_trb);
|
||||
|
||||
event = xhci_wait_for_event(ctrl, TRB_TRANSFER);
|
||||
if (!event)
|
||||
goto abort;
|
||||
field = le32_to_cpu(event->trans_event.flags);
|
||||
|
||||
BUG_ON(TRB_TO_SLOT_ID(field) != slot_id);
|
||||
BUG_ON(TRB_TO_EP_INDEX(field) != ep_index);
|
||||
|
||||
record_transfer_result(udev, event, length);
|
||||
xhci_acknowledge_event(ctrl);
|
||||
|
||||
/* Invalidate buffer to make it available to usb-core */
|
||||
if (length > 0)
|
||||
xhci_inval_cache((uint32_t)buffer, length);
|
||||
|
||||
if (GET_COMP_CODE(le32_to_cpu(event->trans_event.transfer_len))
|
||||
== COMP_SHORT_TX) {
|
||||
/* Short data stage, clear up additional status stage event */
|
||||
event = xhci_wait_for_event(ctrl, TRB_TRANSFER);
|
||||
if (!event)
|
||||
goto abort;
|
||||
BUG_ON(TRB_TO_SLOT_ID(field) != slot_id);
|
||||
BUG_ON(TRB_TO_EP_INDEX(field) != ep_index);
|
||||
xhci_acknowledge_event(ctrl);
|
||||
}
|
||||
|
||||
return (udev->status != USB_ST_NOT_PROC) ? 0 : -1;
|
||||
|
||||
abort:
|
||||
debug("XHCI control transfer timed out, aborting...\n");
|
||||
abort_td(udev, ep_index);
|
||||
udev->status = USB_ST_NAK_REC;
|
||||
udev->act_len = 0;
|
||||
return -ETIMEDOUT;
|
||||
}
|
1030
drivers/usb/host/xhci.c
Normal file
1030
drivers/usb/host/xhci.c
Normal file
File diff suppressed because it is too large
Load Diff
1255
drivers/usb/host/xhci.h
Normal file
1255
drivers/usb/host/xhci.h
Normal file
File diff suppressed because it is too large
Load Diff
@ -125,6 +125,8 @@ struct usb_device {
|
||||
struct usb_device *children[USB_MAXCHILDREN];
|
||||
|
||||
void *controller; /* hardware controller private data */
|
||||
/* slot_id - for xHCI enabled devices */
|
||||
unsigned int slot_id;
|
||||
};
|
||||
|
||||
/**********************************************************************
|
||||
@ -138,7 +140,7 @@ struct usb_device {
|
||||
defined(CONFIG_USB_OMAP3) || defined(CONFIG_USB_DA8XX) || \
|
||||
defined(CONFIG_USB_BLACKFIN) || defined(CONFIG_USB_AM35X) || \
|
||||
defined(CONFIG_USB_MUSB_DSPS) || defined(CONFIG_USB_MUSB_AM35X) || \
|
||||
defined(CONFIG_USB_MUSB_OMAP2PLUS)
|
||||
defined(CONFIG_USB_MUSB_OMAP2PLUS) || defined(CONFIG_USB_XHCI)
|
||||
|
||||
int usb_lowlevel_init(int index, void **controller);
|
||||
int usb_lowlevel_stop(int index);
|
||||
@ -338,6 +340,10 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate);
|
||||
#define usb_pipecontrol(pipe) (usb_pipetype((pipe)) == PIPE_CONTROL)
|
||||
#define usb_pipebulk(pipe) (usb_pipetype((pipe)) == PIPE_BULK)
|
||||
|
||||
#define usb_pipe_ep_index(pipe) \
|
||||
usb_pipecontrol(pipe) ? (usb_pipeendpoint(pipe) * 2) : \
|
||||
((usb_pipeendpoint(pipe) * 2) - \
|
||||
(usb_pipein(pipe) ? 0 : 1))
|
||||
|
||||
/*************************************************************************
|
||||
* Hub Stuff
|
||||
@ -382,5 +388,6 @@ struct usb_device *usb_alloc_new_device(void *controller);
|
||||
|
||||
int usb_new_device(struct usb_device *dev);
|
||||
void usb_free_device(void);
|
||||
int usb_alloc_device(struct usb_device *dev);
|
||||
|
||||
#endif /*_USB_H_ */
|
||||
|
Loading…
Reference in New Issue
Block a user