Merge git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile
Pull arch/tile updates from Chris Metcalf: "These changes provide support for PCIe root complex and USB host mode for tilegx's on-chip I/Os. In addition, this pull provides the required underpinning for the on-chip networking support that was pulled into 3.5. The changes have all been through LKML (with several rounds for PCIe RC) and on linux-next." * git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile: tile: updates to pci root complex from community feedback bounce: allow use of bounce pool via config option usb: add host support for the tilegx architecture arch/tile: provide kernel support for the tilegx USB shim tile pci: enable IOMMU to support DMA for legacy devices arch/tile: enable ZONE_DMA for tilegx tilegx pci: support I/O to arbitrarily-cached pages tile: remove unused header arch/tile: tilegx PCI root complex support arch/tile: provide kernel support for the tilegx TRIO shim arch/tile: break out the "csum a long" function to <asm/checksum.h> arch/tile: provide kernel support for the tilegx mPIPE shim arch/tile: common DMA code for the GXIO IORPC subsystem arch/tile: support MMIO-based readb/writeb etc. arch/tile: introduce GXIO IORPC framework for tilegx
This commit is contained in:
		
						commit
						f0a08fcb59
					
				| @ -3,6 +3,8 @@ | ||||
| 
 | ||||
| config TILE | ||||
| 	def_bool y | ||||
| 	select HAVE_DMA_ATTRS | ||||
| 	select HAVE_DMA_API_DEBUG | ||||
| 	select HAVE_KVM if !TILEGX | ||||
| 	select GENERIC_FIND_FIRST_BIT | ||||
| 	select USE_GENERIC_SMP_HELPERS | ||||
| @ -79,6 +81,9 @@ config ARCH_DMA_ADDR_T_64BIT | ||||
| config NEED_DMA_MAP_STATE | ||||
| 	def_bool y | ||||
| 
 | ||||
| config ARCH_HAS_DMA_SET_COHERENT_MASK | ||||
| 	bool | ||||
| 
 | ||||
| config LOCKDEP_SUPPORT | ||||
| 	def_bool y | ||||
| 
 | ||||
| @ -212,6 +217,22 @@ config HIGHMEM | ||||
| 
 | ||||
| 	  If unsure, say "true". | ||||
| 
 | ||||
| config ZONE_DMA | ||||
| 	def_bool y | ||||
| 
 | ||||
| config IOMMU_HELPER | ||||
| 	bool | ||||
| 
 | ||||
| config NEED_SG_DMA_LENGTH | ||||
| 	bool | ||||
| 
 | ||||
| config SWIOTLB | ||||
| 	bool | ||||
| 	default TILEGX | ||||
| 	select IOMMU_HELPER | ||||
| 	select NEED_SG_DMA_LENGTH | ||||
| 	select ARCH_HAS_DMA_SET_COHERENT_MASK | ||||
| 
 | ||||
| # We do not currently support disabling NUMA. | ||||
| config NUMA | ||||
| 	bool # "NUMA Memory Allocation and Scheduler Support" | ||||
| @ -345,6 +366,8 @@ config KERNEL_PL | ||||
| 	  kernel will be built to run at.  Generally you should use | ||||
| 	  the default value here. | ||||
| 
 | ||||
| source "arch/tile/gxio/Kconfig" | ||||
| 
 | ||||
| endmenu  # Tilera-specific configuration | ||||
| 
 | ||||
| menu "Bus options" | ||||
| @ -354,6 +377,9 @@ config PCI | ||||
| 	default y | ||||
| 	select PCI_DOMAINS | ||||
| 	select GENERIC_PCI_IOMAP | ||||
| 	select TILE_GXIO_TRIO if TILEGX | ||||
| 	select ARCH_SUPPORTS_MSI if TILEGX | ||||
| 	select PCI_MSI if TILEGX | ||||
| 	---help--- | ||||
| 	  Enable PCI root complex support, so PCIe endpoint devices can | ||||
| 	  be attached to the Tile chip.  Many, but not all, PCI devices | ||||
| @ -370,6 +396,22 @@ config NO_IOPORT | ||||
| 
 | ||||
| source "drivers/pci/Kconfig" | ||||
| 
 | ||||
| config TILE_USB | ||||
| 	tristate "Tilera USB host adapter support" | ||||
| 	default y | ||||
| 	depends on USB | ||||
| 	depends on TILEGX | ||||
| 	select TILE_GXIO_USB_HOST | ||||
| 	---help--- | ||||
| 	  Provides USB host adapter support for the built-in EHCI and OHCI | ||||
| 	  interfaces on TILE-Gx chips. | ||||
| 
 | ||||
| # USB OHCI needs the bounce pool since tilegx will often have more | ||||
| # than 4GB of memory, but we don't currently use the IOTLB to present | ||||
| # a 32-bit address to OHCI.  So we need to use a bounce pool instead. | ||||
| config NEED_BOUNCE_POOL | ||||
| 	def_bool USB_OHCI_HCD | ||||
| 
 | ||||
| config HOTPLUG | ||||
| 	bool "Support for hot-pluggable devices" | ||||
| 	---help--- | ||||
|  | ||||
| @ -59,6 +59,8 @@ libs-y		+= $(LIBGCC_PATH) | ||||
| # See arch/tile/Kbuild for content of core part of the kernel
 | ||||
| core-y		+= arch/tile/ | ||||
| 
 | ||||
| core-$(CONFIG_TILE_GXIO) += arch/tile/gxio/ | ||||
| 
 | ||||
| ifdef TILERA_ROOT | ||||
| INSTALL_PATH ?= $(TILERA_ROOT)/tile/boot | ||||
| endif | ||||
|  | ||||
							
								
								
									
										28
									
								
								arch/tile/gxio/Kconfig
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										28
									
								
								arch/tile/gxio/Kconfig
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,28 @@ | ||||
| # Support direct access to TILE-Gx hardware from user space, via the | ||||
| # gxio library, or from kernel space, via kernel IORPC support. | ||||
| config TILE_GXIO | ||||
| 	bool | ||||
| 	depends on TILEGX | ||||
| 
 | ||||
| # Support direct access to the common I/O DMA facility within the | ||||
| # TILE-Gx mPIPE and Trio hardware from kernel space. | ||||
| config TILE_GXIO_DMA | ||||
| 	bool | ||||
| 	select TILE_GXIO | ||||
| 
 | ||||
| # Support direct access to the TILE-Gx mPIPE hardware from kernel space. | ||||
| config TILE_GXIO_MPIPE | ||||
| 	bool | ||||
| 	select TILE_GXIO | ||||
| 	select TILE_GXIO_DMA | ||||
| 
 | ||||
| # Support direct access to the TILE-Gx TRIO hardware from kernel space. | ||||
| config TILE_GXIO_TRIO | ||||
| 	bool | ||||
| 	select TILE_GXIO | ||||
| 	select TILE_GXIO_DMA | ||||
| 
 | ||||
| # Support direct access to the TILE-Gx USB hardware from kernel space. | ||||
| config TILE_GXIO_USB_HOST | ||||
| 	bool | ||||
| 	select TILE_GXIO | ||||
							
								
								
									
										9
									
								
								arch/tile/gxio/Makefile
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										9
									
								
								arch/tile/gxio/Makefile
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,9 @@ | ||||
| #
 | ||||
| # Makefile for the Tile-Gx device access support.
 | ||||
| #
 | ||||
| 
 | ||||
| obj-$(CONFIG_TILE_GXIO) += iorpc_globals.o kiorpc.o | ||||
| obj-$(CONFIG_TILE_GXIO_DMA) += dma_queue.o | ||||
| obj-$(CONFIG_TILE_GXIO_MPIPE) += mpipe.o iorpc_mpipe.o iorpc_mpipe_info.o | ||||
| obj-$(CONFIG_TILE_GXIO_TRIO) += trio.o iorpc_trio.o | ||||
| obj-$(CONFIG_TILE_GXIO_USB_HOST) += usb_host.o iorpc_usb_host.o | ||||
							
								
								
									
										176
									
								
								arch/tile/gxio/dma_queue.c
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										176
									
								
								arch/tile/gxio/dma_queue.c
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,176 @@ | ||||
| /*
 | ||||
|  * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||||
|  * | ||||
|  *   This program is free software; you can redistribute it and/or | ||||
|  *   modify it under the terms of the GNU General Public License | ||||
|  *   as published by the Free Software Foundation, version 2. | ||||
|  * | ||||
|  *   This program is distributed in the hope that it will be useful, but | ||||
|  *   WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||||
|  *   NON INFRINGEMENT.  See the GNU General Public License for | ||||
|  *   more details. | ||||
|  */ | ||||
| 
 | ||||
| #include <linux/io.h> | ||||
| #include <linux/atomic.h> | ||||
| #include <linux/module.h> | ||||
| #include <gxio/dma_queue.h> | ||||
| 
 | ||||
| /* Wait for a memory read to complete. */ | ||||
| #define wait_for_value(val)                             \ | ||||
|   __asm__ __volatile__("move %0, %0" :: "r"(val)) | ||||
| 
 | ||||
| /* The index is in the low 16. */ | ||||
| #define DMA_QUEUE_INDEX_MASK ((1 << 16) - 1) | ||||
| 
 | ||||
| /*
 | ||||
|  * The hardware descriptor-ring type. | ||||
|  * This matches the types used by mpipe (MPIPE_EDMA_POST_REGION_VAL_t) | ||||
|  * and trio (TRIO_PUSH_DMA_REGION_VAL_t or TRIO_PULL_DMA_REGION_VAL_t). | ||||
|  * See those types for more documentation on the individual fields. | ||||
|  */ | ||||
| typedef union { | ||||
| 	struct { | ||||
| #ifndef __BIG_ENDIAN__ | ||||
| 		uint64_t ring_idx:16; | ||||
| 		uint64_t count:16; | ||||
| 		uint64_t gen:1; | ||||
| 		uint64_t __reserved:31; | ||||
| #else | ||||
| 		uint64_t __reserved:31; | ||||
| 		uint64_t gen:1; | ||||
| 		uint64_t count:16; | ||||
| 		uint64_t ring_idx:16; | ||||
| #endif | ||||
| 	}; | ||||
| 	uint64_t word; | ||||
| } __gxio_ring_t; | ||||
| 
 | ||||
| void __gxio_dma_queue_init(__gxio_dma_queue_t *dma_queue, | ||||
| 			   void *post_region_addr, unsigned int num_entries) | ||||
| { | ||||
| 	/*
 | ||||
| 	 * Limit 65536 entry rings to 65535 credits because we only have a | ||||
| 	 * 16 bit completion counter. | ||||
| 	 */ | ||||
| 	int64_t credits = (num_entries < 65536) ? num_entries : 65535; | ||||
| 
 | ||||
| 	memset(dma_queue, 0, sizeof(*dma_queue)); | ||||
| 
 | ||||
| 	dma_queue->post_region_addr = post_region_addr; | ||||
| 	dma_queue->hw_complete_count = 0; | ||||
| 	dma_queue->credits_and_next_index = credits << DMA_QUEUE_CREDIT_SHIFT; | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL_GPL(__gxio_dma_queue_init); | ||||
| 
 | ||||
| void __gxio_dma_queue_update_credits(__gxio_dma_queue_t *dma_queue) | ||||
| { | ||||
| 	__gxio_ring_t val; | ||||
| 	uint64_t count; | ||||
| 	uint64_t delta; | ||||
| 	uint64_t new_count; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Read the 64-bit completion count without touching the cache, so | ||||
| 	 * we later avoid having to evict any sharers of this cache line | ||||
| 	 * when we update it below. | ||||
| 	 */ | ||||
| 	uint64_t orig_hw_complete_count = | ||||
| 		cmpxchg(&dma_queue->hw_complete_count, | ||||
| 			-1, -1); | ||||
| 
 | ||||
| 	/* Make sure the load completes before we access the hardware. */ | ||||
| 	wait_for_value(orig_hw_complete_count); | ||||
| 
 | ||||
| 	/* Read the 16-bit count of how many packets it has completed. */ | ||||
| 	val.word = __gxio_mmio_read(dma_queue->post_region_addr); | ||||
| 	count = val.count; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Calculate the number of completions since we last updated the | ||||
| 	 * 64-bit counter.  It's safe to ignore the high bits because the | ||||
| 	 * maximum credit value is 65535. | ||||
| 	 */ | ||||
| 	delta = (count - orig_hw_complete_count) & 0xffff; | ||||
| 	if (delta == 0) | ||||
| 		return; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Try to write back the count, advanced by delta.  If we race with | ||||
| 	 * another thread, this might fail, in which case we return | ||||
| 	 * immediately on the assumption that some credits are (or at least | ||||
| 	 * were) available. | ||||
| 	 */ | ||||
| 	new_count = orig_hw_complete_count + delta; | ||||
| 	if (cmpxchg(&dma_queue->hw_complete_count, | ||||
| 		    orig_hw_complete_count, | ||||
| 		    new_count) != orig_hw_complete_count) | ||||
| 		return; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * We succeeded in advancing the completion count; add back the | ||||
| 	 * corresponding number of egress credits. | ||||
| 	 */ | ||||
| 	__insn_fetchadd(&dma_queue->credits_and_next_index, | ||||
| 			(delta << DMA_QUEUE_CREDIT_SHIFT)); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL_GPL(__gxio_dma_queue_update_credits); | ||||
| 
 | ||||
| /*
 | ||||
|  * A separate 'blocked' method for put() so that backtraces and | ||||
|  * profiles will clearly indicate that we're wasting time spinning on | ||||
|  * egress availability rather than actually posting commands. | ||||
|  */ | ||||
| int64_t __gxio_dma_queue_wait_for_credits(__gxio_dma_queue_t *dma_queue, | ||||
| 					  int64_t modifier) | ||||
| { | ||||
| 	int backoff = 16; | ||||
| 	int64_t old; | ||||
| 
 | ||||
| 	do { | ||||
| 		int i; | ||||
| 		/* Back off to avoid spamming memory networks. */ | ||||
| 		for (i = backoff; i > 0; i--) | ||||
| 			__insn_mfspr(SPR_PASS); | ||||
| 
 | ||||
| 		/* Check credits again. */ | ||||
| 		__gxio_dma_queue_update_credits(dma_queue); | ||||
| 		old = __insn_fetchaddgez(&dma_queue->credits_and_next_index, | ||||
| 					 modifier); | ||||
| 
 | ||||
| 		/* Calculate bounded exponential backoff for next iteration. */ | ||||
| 		if (backoff < 256) | ||||
| 			backoff *= 2; | ||||
| 	} while (old + modifier < 0); | ||||
| 
 | ||||
| 	return old; | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL_GPL(__gxio_dma_queue_wait_for_credits); | ||||
| 
 | ||||
| int64_t __gxio_dma_queue_reserve_aux(__gxio_dma_queue_t *dma_queue, | ||||
| 				     unsigned int num, int wait) | ||||
| { | ||||
| 	return __gxio_dma_queue_reserve(dma_queue, num, wait != 0, true); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL_GPL(__gxio_dma_queue_reserve_aux); | ||||
| 
 | ||||
| int __gxio_dma_queue_is_complete(__gxio_dma_queue_t *dma_queue, | ||||
| 				 int64_t completion_slot, int update) | ||||
| { | ||||
| 	if (update) { | ||||
| 		if (ACCESS_ONCE(dma_queue->hw_complete_count) > | ||||
| 		    completion_slot) | ||||
| 			return 1; | ||||
| 
 | ||||
| 		__gxio_dma_queue_update_credits(dma_queue); | ||||
| 	} | ||||
| 
 | ||||
| 	return ACCESS_ONCE(dma_queue->hw_complete_count) > completion_slot; | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL_GPL(__gxio_dma_queue_is_complete); | ||||
							
								
								
									
										89
									
								
								arch/tile/gxio/iorpc_globals.c
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										89
									
								
								arch/tile/gxio/iorpc_globals.c
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,89 @@ | ||||
| /*
 | ||||
|  * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||||
|  * | ||||
|  *   This program is free software; you can redistribute it and/or | ||||
|  *   modify it under the terms of the GNU General Public License | ||||
|  *   as published by the Free Software Foundation, version 2. | ||||
|  * | ||||
|  *   This program is distributed in the hope that it will be useful, but | ||||
|  *   WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||||
|  *   NON INFRINGEMENT.  See the GNU General Public License for | ||||
|  *   more details. | ||||
|  */ | ||||
| 
 | ||||
| /* This file is machine-generated; DO NOT EDIT! */ | ||||
| #include "gxio/iorpc_globals.h" | ||||
| 
 | ||||
| struct arm_pollfd_param { | ||||
| 	union iorpc_pollfd pollfd; | ||||
| }; | ||||
| 
 | ||||
| int __iorpc_arm_pollfd(int fd, int pollfd_cookie) | ||||
| { | ||||
| 	struct arm_pollfd_param temp; | ||||
| 	struct arm_pollfd_param *params = &temp; | ||||
| 
 | ||||
| 	params->pollfd.kernel.cookie = pollfd_cookie; | ||||
| 
 | ||||
| 	return hv_dev_pwrite(fd, 0, (HV_VirtAddr) params, sizeof(*params), | ||||
| 			     IORPC_OP_ARM_POLLFD); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(__iorpc_arm_pollfd); | ||||
| 
 | ||||
| struct close_pollfd_param { | ||||
| 	union iorpc_pollfd pollfd; | ||||
| }; | ||||
| 
 | ||||
| int __iorpc_close_pollfd(int fd, int pollfd_cookie) | ||||
| { | ||||
| 	struct close_pollfd_param temp; | ||||
| 	struct close_pollfd_param *params = &temp; | ||||
| 
 | ||||
| 	params->pollfd.kernel.cookie = pollfd_cookie; | ||||
| 
 | ||||
| 	return hv_dev_pwrite(fd, 0, (HV_VirtAddr) params, sizeof(*params), | ||||
| 			     IORPC_OP_CLOSE_POLLFD); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(__iorpc_close_pollfd); | ||||
| 
 | ||||
| struct get_mmio_base_param { | ||||
| 	HV_PTE base; | ||||
| }; | ||||
| 
 | ||||
| int __iorpc_get_mmio_base(int fd, HV_PTE *base) | ||||
| { | ||||
| 	int __result; | ||||
| 	struct get_mmio_base_param temp; | ||||
| 	struct get_mmio_base_param *params = &temp; | ||||
| 
 | ||||
| 	__result = | ||||
| 	    hv_dev_pread(fd, 0, (HV_VirtAddr) params, sizeof(*params), | ||||
| 			 IORPC_OP_GET_MMIO_BASE); | ||||
| 	*base = params->base; | ||||
| 
 | ||||
| 	return __result; | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(__iorpc_get_mmio_base); | ||||
| 
 | ||||
| struct check_mmio_offset_param { | ||||
| 	unsigned long offset; | ||||
| 	unsigned long size; | ||||
| }; | ||||
| 
 | ||||
| int __iorpc_check_mmio_offset(int fd, unsigned long offset, unsigned long size) | ||||
| { | ||||
| 	struct check_mmio_offset_param temp; | ||||
| 	struct check_mmio_offset_param *params = &temp; | ||||
| 
 | ||||
| 	params->offset = offset; | ||||
| 	params->size = size; | ||||
| 
 | ||||
| 	return hv_dev_pwrite(fd, 0, (HV_VirtAddr) params, sizeof(*params), | ||||
| 			     IORPC_OP_CHECK_MMIO_OFFSET); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(__iorpc_check_mmio_offset); | ||||
							
								
								
									
										529
									
								
								arch/tile/gxio/iorpc_mpipe.c
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										529
									
								
								arch/tile/gxio/iorpc_mpipe.c
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,529 @@ | ||||
| /*
 | ||||
|  * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||||
|  * | ||||
|  *   This program is free software; you can redistribute it and/or | ||||
|  *   modify it under the terms of the GNU General Public License | ||||
|  *   as published by the Free Software Foundation, version 2. | ||||
|  * | ||||
|  *   This program is distributed in the hope that it will be useful, but | ||||
|  *   WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||||
|  *   NON INFRINGEMENT.  See the GNU General Public License for | ||||
|  *   more details. | ||||
|  */ | ||||
| 
 | ||||
| /* This file is machine-generated; DO NOT EDIT! */ | ||||
| #include "gxio/iorpc_mpipe.h" | ||||
| 
 | ||||
| struct alloc_buffer_stacks_param { | ||||
| 	unsigned int count; | ||||
| 	unsigned int first; | ||||
| 	unsigned int flags; | ||||
| }; | ||||
| 
 | ||||
| int gxio_mpipe_alloc_buffer_stacks(gxio_mpipe_context_t * context, | ||||
| 				   unsigned int count, unsigned int first, | ||||
| 				   unsigned int flags) | ||||
| { | ||||
| 	struct alloc_buffer_stacks_param temp; | ||||
| 	struct alloc_buffer_stacks_param *params = &temp; | ||||
| 
 | ||||
| 	params->count = count; | ||||
| 	params->first = first; | ||||
| 	params->flags = flags; | ||||
| 
 | ||||
| 	return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||||
| 			     sizeof(*params), | ||||
| 			     GXIO_MPIPE_OP_ALLOC_BUFFER_STACKS); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(gxio_mpipe_alloc_buffer_stacks); | ||||
| 
 | ||||
| struct init_buffer_stack_aux_param { | ||||
| 	union iorpc_mem_buffer buffer; | ||||
| 	unsigned int stack; | ||||
| 	unsigned int buffer_size_enum; | ||||
| }; | ||||
| 
 | ||||
| int gxio_mpipe_init_buffer_stack_aux(gxio_mpipe_context_t * context, | ||||
| 				     void *mem_va, size_t mem_size, | ||||
| 				     unsigned int mem_flags, unsigned int stack, | ||||
| 				     unsigned int buffer_size_enum) | ||||
| { | ||||
| 	int __result; | ||||
| 	unsigned long long __cpa; | ||||
| 	pte_t __pte; | ||||
| 	struct init_buffer_stack_aux_param temp; | ||||
| 	struct init_buffer_stack_aux_param *params = &temp; | ||||
| 
 | ||||
| 	__result = va_to_cpa_and_pte(mem_va, &__cpa, &__pte); | ||||
| 	if (__result != 0) | ||||
| 		return __result; | ||||
| 	params->buffer.kernel.cpa = __cpa; | ||||
| 	params->buffer.kernel.size = mem_size; | ||||
| 	params->buffer.kernel.pte = __pte; | ||||
| 	params->buffer.kernel.flags = mem_flags; | ||||
| 	params->stack = stack; | ||||
| 	params->buffer_size_enum = buffer_size_enum; | ||||
| 
 | ||||
| 	return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||||
| 			     sizeof(*params), | ||||
| 			     GXIO_MPIPE_OP_INIT_BUFFER_STACK_AUX); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(gxio_mpipe_init_buffer_stack_aux); | ||||
| 
 | ||||
| 
 | ||||
| struct alloc_notif_rings_param { | ||||
| 	unsigned int count; | ||||
| 	unsigned int first; | ||||
| 	unsigned int flags; | ||||
| }; | ||||
| 
 | ||||
| int gxio_mpipe_alloc_notif_rings(gxio_mpipe_context_t * context, | ||||
| 				 unsigned int count, unsigned int first, | ||||
| 				 unsigned int flags) | ||||
| { | ||||
| 	struct alloc_notif_rings_param temp; | ||||
| 	struct alloc_notif_rings_param *params = &temp; | ||||
| 
 | ||||
| 	params->count = count; | ||||
| 	params->first = first; | ||||
| 	params->flags = flags; | ||||
| 
 | ||||
| 	return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||||
| 			     sizeof(*params), GXIO_MPIPE_OP_ALLOC_NOTIF_RINGS); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(gxio_mpipe_alloc_notif_rings); | ||||
| 
 | ||||
| struct init_notif_ring_aux_param { | ||||
| 	union iorpc_mem_buffer buffer; | ||||
| 	unsigned int ring; | ||||
| }; | ||||
| 
 | ||||
| int gxio_mpipe_init_notif_ring_aux(gxio_mpipe_context_t * context, void *mem_va, | ||||
| 				   size_t mem_size, unsigned int mem_flags, | ||||
| 				   unsigned int ring) | ||||
| { | ||||
| 	int __result; | ||||
| 	unsigned long long __cpa; | ||||
| 	pte_t __pte; | ||||
| 	struct init_notif_ring_aux_param temp; | ||||
| 	struct init_notif_ring_aux_param *params = &temp; | ||||
| 
 | ||||
| 	__result = va_to_cpa_and_pte(mem_va, &__cpa, &__pte); | ||||
| 	if (__result != 0) | ||||
| 		return __result; | ||||
| 	params->buffer.kernel.cpa = __cpa; | ||||
| 	params->buffer.kernel.size = mem_size; | ||||
| 	params->buffer.kernel.pte = __pte; | ||||
| 	params->buffer.kernel.flags = mem_flags; | ||||
| 	params->ring = ring; | ||||
| 
 | ||||
| 	return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||||
| 			     sizeof(*params), | ||||
| 			     GXIO_MPIPE_OP_INIT_NOTIF_RING_AUX); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(gxio_mpipe_init_notif_ring_aux); | ||||
| 
 | ||||
| struct request_notif_ring_interrupt_param { | ||||
| 	union iorpc_interrupt interrupt; | ||||
| 	unsigned int ring; | ||||
| }; | ||||
| 
 | ||||
| int gxio_mpipe_request_notif_ring_interrupt(gxio_mpipe_context_t * context, | ||||
| 					    int inter_x, int inter_y, | ||||
| 					    int inter_ipi, int inter_event, | ||||
| 					    unsigned int ring) | ||||
| { | ||||
| 	struct request_notif_ring_interrupt_param temp; | ||||
| 	struct request_notif_ring_interrupt_param *params = &temp; | ||||
| 
 | ||||
| 	params->interrupt.kernel.x = inter_x; | ||||
| 	params->interrupt.kernel.y = inter_y; | ||||
| 	params->interrupt.kernel.ipi = inter_ipi; | ||||
| 	params->interrupt.kernel.event = inter_event; | ||||
| 	params->ring = ring; | ||||
| 
 | ||||
| 	return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||||
| 			     sizeof(*params), | ||||
| 			     GXIO_MPIPE_OP_REQUEST_NOTIF_RING_INTERRUPT); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(gxio_mpipe_request_notif_ring_interrupt); | ||||
| 
 | ||||
| struct enable_notif_ring_interrupt_param { | ||||
| 	unsigned int ring; | ||||
| }; | ||||
| 
 | ||||
| int gxio_mpipe_enable_notif_ring_interrupt(gxio_mpipe_context_t * context, | ||||
| 					   unsigned int ring) | ||||
| { | ||||
| 	struct enable_notif_ring_interrupt_param temp; | ||||
| 	struct enable_notif_ring_interrupt_param *params = &temp; | ||||
| 
 | ||||
| 	params->ring = ring; | ||||
| 
 | ||||
| 	return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||||
| 			     sizeof(*params), | ||||
| 			     GXIO_MPIPE_OP_ENABLE_NOTIF_RING_INTERRUPT); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(gxio_mpipe_enable_notif_ring_interrupt); | ||||
| 
 | ||||
| struct alloc_notif_groups_param { | ||||
| 	unsigned int count; | ||||
| 	unsigned int first; | ||||
| 	unsigned int flags; | ||||
| }; | ||||
| 
 | ||||
| int gxio_mpipe_alloc_notif_groups(gxio_mpipe_context_t * context, | ||||
| 				  unsigned int count, unsigned int first, | ||||
| 				  unsigned int flags) | ||||
| { | ||||
| 	struct alloc_notif_groups_param temp; | ||||
| 	struct alloc_notif_groups_param *params = &temp; | ||||
| 
 | ||||
| 	params->count = count; | ||||
| 	params->first = first; | ||||
| 	params->flags = flags; | ||||
| 
 | ||||
| 	return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||||
| 			     sizeof(*params), GXIO_MPIPE_OP_ALLOC_NOTIF_GROUPS); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(gxio_mpipe_alloc_notif_groups); | ||||
| 
 | ||||
| struct init_notif_group_param { | ||||
| 	unsigned int group; | ||||
| 	gxio_mpipe_notif_group_bits_t bits; | ||||
| }; | ||||
| 
 | ||||
| int gxio_mpipe_init_notif_group(gxio_mpipe_context_t * context, | ||||
| 				unsigned int group, | ||||
| 				gxio_mpipe_notif_group_bits_t bits) | ||||
| { | ||||
| 	struct init_notif_group_param temp; | ||||
| 	struct init_notif_group_param *params = &temp; | ||||
| 
 | ||||
| 	params->group = group; | ||||
| 	params->bits = bits; | ||||
| 
 | ||||
| 	return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||||
| 			     sizeof(*params), GXIO_MPIPE_OP_INIT_NOTIF_GROUP); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(gxio_mpipe_init_notif_group); | ||||
| 
 | ||||
| struct alloc_buckets_param { | ||||
| 	unsigned int count; | ||||
| 	unsigned int first; | ||||
| 	unsigned int flags; | ||||
| }; | ||||
| 
 | ||||
| int gxio_mpipe_alloc_buckets(gxio_mpipe_context_t * context, unsigned int count, | ||||
| 			     unsigned int first, unsigned int flags) | ||||
| { | ||||
| 	struct alloc_buckets_param temp; | ||||
| 	struct alloc_buckets_param *params = &temp; | ||||
| 
 | ||||
| 	params->count = count; | ||||
| 	params->first = first; | ||||
| 	params->flags = flags; | ||||
| 
 | ||||
| 	return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||||
| 			     sizeof(*params), GXIO_MPIPE_OP_ALLOC_BUCKETS); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(gxio_mpipe_alloc_buckets); | ||||
| 
 | ||||
| struct init_bucket_param { | ||||
| 	unsigned int bucket; | ||||
| 	MPIPE_LBL_INIT_DAT_BSTS_TBL_t bucket_info; | ||||
| }; | ||||
| 
 | ||||
| int gxio_mpipe_init_bucket(gxio_mpipe_context_t * context, unsigned int bucket, | ||||
| 			   MPIPE_LBL_INIT_DAT_BSTS_TBL_t bucket_info) | ||||
| { | ||||
| 	struct init_bucket_param temp; | ||||
| 	struct init_bucket_param *params = &temp; | ||||
| 
 | ||||
| 	params->bucket = bucket; | ||||
| 	params->bucket_info = bucket_info; | ||||
| 
 | ||||
| 	return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||||
| 			     sizeof(*params), GXIO_MPIPE_OP_INIT_BUCKET); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(gxio_mpipe_init_bucket); | ||||
| 
 | ||||
| struct alloc_edma_rings_param { | ||||
| 	unsigned int count; | ||||
| 	unsigned int first; | ||||
| 	unsigned int flags; | ||||
| }; | ||||
| 
 | ||||
| int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t * context, | ||||
| 				unsigned int count, unsigned int first, | ||||
| 				unsigned int flags) | ||||
| { | ||||
| 	struct alloc_edma_rings_param temp; | ||||
| 	struct alloc_edma_rings_param *params = &temp; | ||||
| 
 | ||||
| 	params->count = count; | ||||
| 	params->first = first; | ||||
| 	params->flags = flags; | ||||
| 
 | ||||
| 	return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||||
| 			     sizeof(*params), GXIO_MPIPE_OP_ALLOC_EDMA_RINGS); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(gxio_mpipe_alloc_edma_rings); | ||||
| 
 | ||||
| struct init_edma_ring_aux_param { | ||||
| 	union iorpc_mem_buffer buffer; | ||||
| 	unsigned int ring; | ||||
| 	unsigned int channel; | ||||
| }; | ||||
| 
 | ||||
| int gxio_mpipe_init_edma_ring_aux(gxio_mpipe_context_t * context, void *mem_va, | ||||
| 				  size_t mem_size, unsigned int mem_flags, | ||||
| 				  unsigned int ring, unsigned int channel) | ||||
| { | ||||
| 	int __result; | ||||
| 	unsigned long long __cpa; | ||||
| 	pte_t __pte; | ||||
| 	struct init_edma_ring_aux_param temp; | ||||
| 	struct init_edma_ring_aux_param *params = &temp; | ||||
| 
 | ||||
| 	__result = va_to_cpa_and_pte(mem_va, &__cpa, &__pte); | ||||
| 	if (__result != 0) | ||||
| 		return __result; | ||||
| 	params->buffer.kernel.cpa = __cpa; | ||||
| 	params->buffer.kernel.size = mem_size; | ||||
| 	params->buffer.kernel.pte = __pte; | ||||
| 	params->buffer.kernel.flags = mem_flags; | ||||
| 	params->ring = ring; | ||||
| 	params->channel = channel; | ||||
| 
 | ||||
| 	return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||||
| 			     sizeof(*params), GXIO_MPIPE_OP_INIT_EDMA_RING_AUX); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(gxio_mpipe_init_edma_ring_aux); | ||||
| 
 | ||||
| 
 | ||||
| int gxio_mpipe_commit_rules(gxio_mpipe_context_t * context, const void *blob, | ||||
| 			    size_t blob_size) | ||||
| { | ||||
| 	const void *params = blob; | ||||
| 
 | ||||
| 	return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, blob_size, | ||||
| 			     GXIO_MPIPE_OP_COMMIT_RULES); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(gxio_mpipe_commit_rules); | ||||
| 
 | ||||
| struct register_client_memory_param { | ||||
| 	unsigned int iotlb; | ||||
| 	HV_PTE pte; | ||||
| 	unsigned int flags; | ||||
| }; | ||||
| 
 | ||||
| int gxio_mpipe_register_client_memory(gxio_mpipe_context_t * context, | ||||
| 				      unsigned int iotlb, HV_PTE pte, | ||||
| 				      unsigned int flags) | ||||
| { | ||||
| 	struct register_client_memory_param temp; | ||||
| 	struct register_client_memory_param *params = &temp; | ||||
| 
 | ||||
| 	params->iotlb = iotlb; | ||||
| 	params->pte = pte; | ||||
| 	params->flags = flags; | ||||
| 
 | ||||
| 	return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||||
| 			     sizeof(*params), | ||||
| 			     GXIO_MPIPE_OP_REGISTER_CLIENT_MEMORY); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(gxio_mpipe_register_client_memory); | ||||
| 
 | ||||
| struct link_open_aux_param { | ||||
| 	_gxio_mpipe_link_name_t name; | ||||
| 	unsigned int flags; | ||||
| }; | ||||
| 
 | ||||
| int gxio_mpipe_link_open_aux(gxio_mpipe_context_t * context, | ||||
| 			     _gxio_mpipe_link_name_t name, unsigned int flags) | ||||
| { | ||||
| 	struct link_open_aux_param temp; | ||||
| 	struct link_open_aux_param *params = &temp; | ||||
| 
 | ||||
| 	params->name = name; | ||||
| 	params->flags = flags; | ||||
| 
 | ||||
| 	return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||||
| 			     sizeof(*params), GXIO_MPIPE_OP_LINK_OPEN_AUX); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(gxio_mpipe_link_open_aux); | ||||
| 
 | ||||
| struct link_close_aux_param { | ||||
| 	int mac; | ||||
| }; | ||||
| 
 | ||||
| int gxio_mpipe_link_close_aux(gxio_mpipe_context_t * context, int mac) | ||||
| { | ||||
| 	struct link_close_aux_param temp; | ||||
| 	struct link_close_aux_param *params = &temp; | ||||
| 
 | ||||
| 	params->mac = mac; | ||||
| 
 | ||||
| 	return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||||
| 			     sizeof(*params), GXIO_MPIPE_OP_LINK_CLOSE_AUX); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(gxio_mpipe_link_close_aux); | ||||
| 
 | ||||
| 
 | ||||
| struct get_timestamp_aux_param { | ||||
| 	uint64_t sec; | ||||
| 	uint64_t nsec; | ||||
| 	uint64_t cycles; | ||||
| }; | ||||
| 
 | ||||
| int gxio_mpipe_get_timestamp_aux(gxio_mpipe_context_t * context, uint64_t * sec, | ||||
| 				 uint64_t * nsec, uint64_t * cycles) | ||||
| { | ||||
| 	int __result; | ||||
| 	struct get_timestamp_aux_param temp; | ||||
| 	struct get_timestamp_aux_param *params = &temp; | ||||
| 
 | ||||
| 	__result = | ||||
| 	    hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params), | ||||
| 			 GXIO_MPIPE_OP_GET_TIMESTAMP_AUX); | ||||
| 	*sec = params->sec; | ||||
| 	*nsec = params->nsec; | ||||
| 	*cycles = params->cycles; | ||||
| 
 | ||||
| 	return __result; | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(gxio_mpipe_get_timestamp_aux); | ||||
| 
 | ||||
| struct set_timestamp_aux_param { | ||||
| 	uint64_t sec; | ||||
| 	uint64_t nsec; | ||||
| 	uint64_t cycles; | ||||
| }; | ||||
| 
 | ||||
| int gxio_mpipe_set_timestamp_aux(gxio_mpipe_context_t * context, uint64_t sec, | ||||
| 				 uint64_t nsec, uint64_t cycles) | ||||
| { | ||||
| 	struct set_timestamp_aux_param temp; | ||||
| 	struct set_timestamp_aux_param *params = &temp; | ||||
| 
 | ||||
| 	params->sec = sec; | ||||
| 	params->nsec = nsec; | ||||
| 	params->cycles = cycles; | ||||
| 
 | ||||
| 	return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||||
| 			     sizeof(*params), GXIO_MPIPE_OP_SET_TIMESTAMP_AUX); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(gxio_mpipe_set_timestamp_aux); | ||||
| 
 | ||||
| struct adjust_timestamp_aux_param { | ||||
| 	int64_t nsec; | ||||
| }; | ||||
| 
 | ||||
| int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t * context, | ||||
| 				    int64_t nsec) | ||||
| { | ||||
| 	struct adjust_timestamp_aux_param temp; | ||||
| 	struct adjust_timestamp_aux_param *params = &temp; | ||||
| 
 | ||||
| 	params->nsec = nsec; | ||||
| 
 | ||||
| 	return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||||
| 			     sizeof(*params), | ||||
| 			     GXIO_MPIPE_OP_ADJUST_TIMESTAMP_AUX); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(gxio_mpipe_adjust_timestamp_aux); | ||||
| 
 | ||||
| struct arm_pollfd_param { | ||||
| 	union iorpc_pollfd pollfd; | ||||
| }; | ||||
| 
 | ||||
| int gxio_mpipe_arm_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie) | ||||
| { | ||||
| 	struct arm_pollfd_param temp; | ||||
| 	struct arm_pollfd_param *params = &temp; | ||||
| 
 | ||||
| 	params->pollfd.kernel.cookie = pollfd_cookie; | ||||
| 
 | ||||
| 	return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||||
| 			     sizeof(*params), GXIO_MPIPE_OP_ARM_POLLFD); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(gxio_mpipe_arm_pollfd); | ||||
| 
 | ||||
| struct close_pollfd_param { | ||||
| 	union iorpc_pollfd pollfd; | ||||
| }; | ||||
| 
 | ||||
| int gxio_mpipe_close_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie) | ||||
| { | ||||
| 	struct close_pollfd_param temp; | ||||
| 	struct close_pollfd_param *params = &temp; | ||||
| 
 | ||||
| 	params->pollfd.kernel.cookie = pollfd_cookie; | ||||
| 
 | ||||
| 	return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||||
| 			     sizeof(*params), GXIO_MPIPE_OP_CLOSE_POLLFD); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(gxio_mpipe_close_pollfd); | ||||
| 
 | ||||
| struct get_mmio_base_param { | ||||
| 	HV_PTE base; | ||||
| }; | ||||
| 
 | ||||
| int gxio_mpipe_get_mmio_base(gxio_mpipe_context_t * context, HV_PTE *base) | ||||
| { | ||||
| 	int __result; | ||||
| 	struct get_mmio_base_param temp; | ||||
| 	struct get_mmio_base_param *params = &temp; | ||||
| 
 | ||||
| 	__result = | ||||
| 	    hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params), | ||||
| 			 GXIO_MPIPE_OP_GET_MMIO_BASE); | ||||
| 	*base = params->base; | ||||
| 
 | ||||
| 	return __result; | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(gxio_mpipe_get_mmio_base); | ||||
| 
 | ||||
| struct check_mmio_offset_param { | ||||
| 	unsigned long offset; | ||||
| 	unsigned long size; | ||||
| }; | ||||
| 
 | ||||
| int gxio_mpipe_check_mmio_offset(gxio_mpipe_context_t * context, | ||||
| 				 unsigned long offset, unsigned long size) | ||||
| { | ||||
| 	struct check_mmio_offset_param temp; | ||||
| 	struct check_mmio_offset_param *params = &temp; | ||||
| 
 | ||||
| 	params->offset = offset; | ||||
| 	params->size = size; | ||||
| 
 | ||||
| 	return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||||
| 			     sizeof(*params), GXIO_MPIPE_OP_CHECK_MMIO_OFFSET); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(gxio_mpipe_check_mmio_offset); | ||||
							
								
								
									
										85
									
								
								arch/tile/gxio/iorpc_mpipe_info.c
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										85
									
								
								arch/tile/gxio/iorpc_mpipe_info.c
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,85 @@ | ||||
| /*
 | ||||
|  * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||||
|  * | ||||
|  *   This program is free software; you can redistribute it and/or | ||||
|  *   modify it under the terms of the GNU General Public License | ||||
|  *   as published by the Free Software Foundation, version 2. | ||||
|  * | ||||
|  *   This program is distributed in the hope that it will be useful, but | ||||
|  *   WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||||
|  *   NON INFRINGEMENT.  See the GNU General Public License for | ||||
|  *   more details. | ||||
|  */ | ||||
| 
 | ||||
| /* This file is machine-generated; DO NOT EDIT! */ | ||||
| #include "gxio/iorpc_mpipe_info.h" | ||||
| 
 | ||||
| 
 | ||||
| struct enumerate_aux_param { | ||||
| 	_gxio_mpipe_link_name_t name; | ||||
| 	_gxio_mpipe_link_mac_t mac; | ||||
| }; | ||||
| 
 | ||||
| int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t * context, | ||||
| 				  unsigned int idx, | ||||
| 				  _gxio_mpipe_link_name_t * name, | ||||
| 				  _gxio_mpipe_link_mac_t * mac) | ||||
| { | ||||
| 	int __result; | ||||
| 	struct enumerate_aux_param temp; | ||||
| 	struct enumerate_aux_param *params = &temp; | ||||
| 
 | ||||
| 	__result = | ||||
| 	    hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params), | ||||
| 			 (((uint64_t) idx << 32) | | ||||
| 			  GXIO_MPIPE_INFO_OP_ENUMERATE_AUX)); | ||||
| 	*name = params->name; | ||||
| 	*mac = params->mac; | ||||
| 
 | ||||
| 	return __result; | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(gxio_mpipe_info_enumerate_aux); | ||||
| 
 | ||||
| struct get_mmio_base_param { | ||||
| 	HV_PTE base; | ||||
| }; | ||||
| 
 | ||||
| int gxio_mpipe_info_get_mmio_base(gxio_mpipe_info_context_t * context, | ||||
| 				  HV_PTE *base) | ||||
| { | ||||
| 	int __result; | ||||
| 	struct get_mmio_base_param temp; | ||||
| 	struct get_mmio_base_param *params = &temp; | ||||
| 
 | ||||
| 	__result = | ||||
| 	    hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params), | ||||
| 			 GXIO_MPIPE_INFO_OP_GET_MMIO_BASE); | ||||
| 	*base = params->base; | ||||
| 
 | ||||
| 	return __result; | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(gxio_mpipe_info_get_mmio_base); | ||||
| 
 | ||||
| struct check_mmio_offset_param { | ||||
| 	unsigned long offset; | ||||
| 	unsigned long size; | ||||
| }; | ||||
| 
 | ||||
| int gxio_mpipe_info_check_mmio_offset(gxio_mpipe_info_context_t * context, | ||||
| 				      unsigned long offset, unsigned long size) | ||||
| { | ||||
| 	struct check_mmio_offset_param temp; | ||||
| 	struct check_mmio_offset_param *params = &temp; | ||||
| 
 | ||||
| 	params->offset = offset; | ||||
| 	params->size = size; | ||||
| 
 | ||||
| 	return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||||
| 			     sizeof(*params), | ||||
| 			     GXIO_MPIPE_INFO_OP_CHECK_MMIO_OFFSET); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(gxio_mpipe_info_check_mmio_offset); | ||||
							
								
								
									
										327
									
								
								arch/tile/gxio/iorpc_trio.c
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										327
									
								
								arch/tile/gxio/iorpc_trio.c
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,327 @@ | ||||
| /*
 | ||||
|  * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||||
|  * | ||||
|  *   This program is free software; you can redistribute it and/or | ||||
|  *   modify it under the terms of the GNU General Public License | ||||
|  *   as published by the Free Software Foundation, version 2. | ||||
|  * | ||||
|  *   This program is distributed in the hope that it will be useful, but | ||||
|  *   WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||||
|  *   NON INFRINGEMENT.  See the GNU General Public License for | ||||
|  *   more details. | ||||
|  */ | ||||
| 
 | ||||
| /* This file is machine-generated; DO NOT EDIT! */ | ||||
| #include "gxio/iorpc_trio.h" | ||||
| 
 | ||||
| struct alloc_asids_param { | ||||
| 	unsigned int count; | ||||
| 	unsigned int first; | ||||
| 	unsigned int flags; | ||||
| }; | ||||
| 
 | ||||
| int gxio_trio_alloc_asids(gxio_trio_context_t * context, unsigned int count, | ||||
| 			  unsigned int first, unsigned int flags) | ||||
| { | ||||
| 	struct alloc_asids_param temp; | ||||
| 	struct alloc_asids_param *params = &temp; | ||||
| 
 | ||||
| 	params->count = count; | ||||
| 	params->first = first; | ||||
| 	params->flags = flags; | ||||
| 
 | ||||
| 	return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||||
| 			     sizeof(*params), GXIO_TRIO_OP_ALLOC_ASIDS); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(gxio_trio_alloc_asids); | ||||
| 
 | ||||
| 
 | ||||
| struct alloc_memory_maps_param { | ||||
| 	unsigned int count; | ||||
| 	unsigned int first; | ||||
| 	unsigned int flags; | ||||
| }; | ||||
| 
 | ||||
| int gxio_trio_alloc_memory_maps(gxio_trio_context_t * context, | ||||
| 				unsigned int count, unsigned int first, | ||||
| 				unsigned int flags) | ||||
| { | ||||
| 	struct alloc_memory_maps_param temp; | ||||
| 	struct alloc_memory_maps_param *params = &temp; | ||||
| 
 | ||||
| 	params->count = count; | ||||
| 	params->first = first; | ||||
| 	params->flags = flags; | ||||
| 
 | ||||
| 	return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||||
| 			     sizeof(*params), GXIO_TRIO_OP_ALLOC_MEMORY_MAPS); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(gxio_trio_alloc_memory_maps); | ||||
| 
 | ||||
| 
 | ||||
| struct alloc_pio_regions_param { | ||||
| 	unsigned int count; | ||||
| 	unsigned int first; | ||||
| 	unsigned int flags; | ||||
| }; | ||||
| 
 | ||||
| int gxio_trio_alloc_pio_regions(gxio_trio_context_t * context, | ||||
| 				unsigned int count, unsigned int first, | ||||
| 				unsigned int flags) | ||||
| { | ||||
| 	struct alloc_pio_regions_param temp; | ||||
| 	struct alloc_pio_regions_param *params = &temp; | ||||
| 
 | ||||
| 	params->count = count; | ||||
| 	params->first = first; | ||||
| 	params->flags = flags; | ||||
| 
 | ||||
| 	return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||||
| 			     sizeof(*params), GXIO_TRIO_OP_ALLOC_PIO_REGIONS); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(gxio_trio_alloc_pio_regions); | ||||
| 
 | ||||
| struct init_pio_region_aux_param { | ||||
| 	unsigned int pio_region; | ||||
| 	unsigned int mac; | ||||
| 	uint32_t bus_address_hi; | ||||
| 	unsigned int flags; | ||||
| }; | ||||
| 
 | ||||
| int gxio_trio_init_pio_region_aux(gxio_trio_context_t * context, | ||||
| 				  unsigned int pio_region, unsigned int mac, | ||||
| 				  uint32_t bus_address_hi, unsigned int flags) | ||||
| { | ||||
| 	struct init_pio_region_aux_param temp; | ||||
| 	struct init_pio_region_aux_param *params = &temp; | ||||
| 
 | ||||
| 	params->pio_region = pio_region; | ||||
| 	params->mac = mac; | ||||
| 	params->bus_address_hi = bus_address_hi; | ||||
| 	params->flags = flags; | ||||
| 
 | ||||
| 	return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||||
| 			     sizeof(*params), GXIO_TRIO_OP_INIT_PIO_REGION_AUX); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(gxio_trio_init_pio_region_aux); | ||||
| 
 | ||||
| 
 | ||||
| struct init_memory_map_mmu_aux_param { | ||||
| 	unsigned int map; | ||||
| 	unsigned long va; | ||||
| 	uint64_t size; | ||||
| 	unsigned int asid; | ||||
| 	unsigned int mac; | ||||
| 	uint64_t bus_address; | ||||
| 	unsigned int node; | ||||
| 	unsigned int order_mode; | ||||
| }; | ||||
| 
 | ||||
| int gxio_trio_init_memory_map_mmu_aux(gxio_trio_context_t * context, | ||||
| 				      unsigned int map, unsigned long va, | ||||
| 				      uint64_t size, unsigned int asid, | ||||
| 				      unsigned int mac, uint64_t bus_address, | ||||
| 				      unsigned int node, | ||||
| 				      unsigned int order_mode) | ||||
| { | ||||
| 	struct init_memory_map_mmu_aux_param temp; | ||||
| 	struct init_memory_map_mmu_aux_param *params = &temp; | ||||
| 
 | ||||
| 	params->map = map; | ||||
| 	params->va = va; | ||||
| 	params->size = size; | ||||
| 	params->asid = asid; | ||||
| 	params->mac = mac; | ||||
| 	params->bus_address = bus_address; | ||||
| 	params->node = node; | ||||
| 	params->order_mode = order_mode; | ||||
| 
 | ||||
| 	return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||||
| 			     sizeof(*params), | ||||
| 			     GXIO_TRIO_OP_INIT_MEMORY_MAP_MMU_AUX); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(gxio_trio_init_memory_map_mmu_aux); | ||||
| 
 | ||||
| struct get_port_property_param { | ||||
| 	struct pcie_trio_ports_property trio_ports; | ||||
| }; | ||||
| 
 | ||||
| int gxio_trio_get_port_property(gxio_trio_context_t * context, | ||||
| 				struct pcie_trio_ports_property *trio_ports) | ||||
| { | ||||
| 	int __result; | ||||
| 	struct get_port_property_param temp; | ||||
| 	struct get_port_property_param *params = &temp; | ||||
| 
 | ||||
| 	__result = | ||||
| 	    hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params), | ||||
| 			 GXIO_TRIO_OP_GET_PORT_PROPERTY); | ||||
| 	*trio_ports = params->trio_ports; | ||||
| 
 | ||||
| 	return __result; | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(gxio_trio_get_port_property); | ||||
| 
 | ||||
| struct config_legacy_intr_param { | ||||
| 	union iorpc_interrupt interrupt; | ||||
| 	unsigned int mac; | ||||
| 	unsigned int intx; | ||||
| }; | ||||
| 
 | ||||
| int gxio_trio_config_legacy_intr(gxio_trio_context_t * context, int inter_x, | ||||
| 				 int inter_y, int inter_ipi, int inter_event, | ||||
| 				 unsigned int mac, unsigned int intx) | ||||
| { | ||||
| 	struct config_legacy_intr_param temp; | ||||
| 	struct config_legacy_intr_param *params = &temp; | ||||
| 
 | ||||
| 	params->interrupt.kernel.x = inter_x; | ||||
| 	params->interrupt.kernel.y = inter_y; | ||||
| 	params->interrupt.kernel.ipi = inter_ipi; | ||||
| 	params->interrupt.kernel.event = inter_event; | ||||
| 	params->mac = mac; | ||||
| 	params->intx = intx; | ||||
| 
 | ||||
| 	return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||||
| 			     sizeof(*params), GXIO_TRIO_OP_CONFIG_LEGACY_INTR); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(gxio_trio_config_legacy_intr); | ||||
| 
 | ||||
| struct config_msi_intr_param { | ||||
| 	union iorpc_interrupt interrupt; | ||||
| 	unsigned int mac; | ||||
| 	unsigned int mem_map; | ||||
| 	uint64_t mem_map_base; | ||||
| 	uint64_t mem_map_limit; | ||||
| 	unsigned int asid; | ||||
| }; | ||||
| 
 | ||||
| int gxio_trio_config_msi_intr(gxio_trio_context_t * context, int inter_x, | ||||
| 			      int inter_y, int inter_ipi, int inter_event, | ||||
| 			      unsigned int mac, unsigned int mem_map, | ||||
| 			      uint64_t mem_map_base, uint64_t mem_map_limit, | ||||
| 			      unsigned int asid) | ||||
| { | ||||
| 	struct config_msi_intr_param temp; | ||||
| 	struct config_msi_intr_param *params = &temp; | ||||
| 
 | ||||
| 	params->interrupt.kernel.x = inter_x; | ||||
| 	params->interrupt.kernel.y = inter_y; | ||||
| 	params->interrupt.kernel.ipi = inter_ipi; | ||||
| 	params->interrupt.kernel.event = inter_event; | ||||
| 	params->mac = mac; | ||||
| 	params->mem_map = mem_map; | ||||
| 	params->mem_map_base = mem_map_base; | ||||
| 	params->mem_map_limit = mem_map_limit; | ||||
| 	params->asid = asid; | ||||
| 
 | ||||
| 	return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||||
| 			     sizeof(*params), GXIO_TRIO_OP_CONFIG_MSI_INTR); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(gxio_trio_config_msi_intr); | ||||
| 
 | ||||
| 
 | ||||
| struct set_mps_mrs_param { | ||||
| 	uint16_t mps; | ||||
| 	uint16_t mrs; | ||||
| 	unsigned int mac; | ||||
| }; | ||||
| 
 | ||||
| int gxio_trio_set_mps_mrs(gxio_trio_context_t * context, uint16_t mps, | ||||
| 			  uint16_t mrs, unsigned int mac) | ||||
| { | ||||
| 	struct set_mps_mrs_param temp; | ||||
| 	struct set_mps_mrs_param *params = &temp; | ||||
| 
 | ||||
| 	params->mps = mps; | ||||
| 	params->mrs = mrs; | ||||
| 	params->mac = mac; | ||||
| 
 | ||||
| 	return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||||
| 			     sizeof(*params), GXIO_TRIO_OP_SET_MPS_MRS); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(gxio_trio_set_mps_mrs); | ||||
| 
 | ||||
| struct force_rc_link_up_param { | ||||
| 	unsigned int mac; | ||||
| }; | ||||
| 
 | ||||
| int gxio_trio_force_rc_link_up(gxio_trio_context_t * context, unsigned int mac) | ||||
| { | ||||
| 	struct force_rc_link_up_param temp; | ||||
| 	struct force_rc_link_up_param *params = &temp; | ||||
| 
 | ||||
| 	params->mac = mac; | ||||
| 
 | ||||
| 	return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||||
| 			     sizeof(*params), GXIO_TRIO_OP_FORCE_RC_LINK_UP); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(gxio_trio_force_rc_link_up); | ||||
| 
 | ||||
| struct force_ep_link_up_param { | ||||
| 	unsigned int mac; | ||||
| }; | ||||
| 
 | ||||
| int gxio_trio_force_ep_link_up(gxio_trio_context_t * context, unsigned int mac) | ||||
| { | ||||
| 	struct force_ep_link_up_param temp; | ||||
| 	struct force_ep_link_up_param *params = &temp; | ||||
| 
 | ||||
| 	params->mac = mac; | ||||
| 
 | ||||
| 	return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||||
| 			     sizeof(*params), GXIO_TRIO_OP_FORCE_EP_LINK_UP); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(gxio_trio_force_ep_link_up); | ||||
| 
 | ||||
| struct get_mmio_base_param { | ||||
| 	HV_PTE base; | ||||
| }; | ||||
| 
 | ||||
| int gxio_trio_get_mmio_base(gxio_trio_context_t * context, HV_PTE *base) | ||||
| { | ||||
| 	int __result; | ||||
| 	struct get_mmio_base_param temp; | ||||
| 	struct get_mmio_base_param *params = &temp; | ||||
| 
 | ||||
| 	__result = | ||||
| 	    hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params), | ||||
| 			 GXIO_TRIO_OP_GET_MMIO_BASE); | ||||
| 	*base = params->base; | ||||
| 
 | ||||
| 	return __result; | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(gxio_trio_get_mmio_base); | ||||
| 
 | ||||
| struct check_mmio_offset_param { | ||||
| 	unsigned long offset; | ||||
| 	unsigned long size; | ||||
| }; | ||||
| 
 | ||||
| int gxio_trio_check_mmio_offset(gxio_trio_context_t * context, | ||||
| 				unsigned long offset, unsigned long size) | ||||
| { | ||||
| 	struct check_mmio_offset_param temp; | ||||
| 	struct check_mmio_offset_param *params = &temp; | ||||
| 
 | ||||
| 	params->offset = offset; | ||||
| 	params->size = size; | ||||
| 
 | ||||
| 	return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||||
| 			     sizeof(*params), GXIO_TRIO_OP_CHECK_MMIO_OFFSET); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(gxio_trio_check_mmio_offset); | ||||
							
								
								
									
										99
									
								
								arch/tile/gxio/iorpc_usb_host.c
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										99
									
								
								arch/tile/gxio/iorpc_usb_host.c
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,99 @@ | ||||
| /*
 | ||||
|  * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||||
|  * | ||||
|  *   This program is free software; you can redistribute it and/or | ||||
|  *   modify it under the terms of the GNU General Public License | ||||
|  *   as published by the Free Software Foundation, version 2. | ||||
|  * | ||||
|  *   This program is distributed in the hope that it will be useful, but | ||||
|  *   WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||||
|  *   NON INFRINGEMENT.  See the GNU General Public License for | ||||
|  *   more details. | ||||
|  */ | ||||
| 
 | ||||
| /* This file is machine-generated; DO NOT EDIT! */ | ||||
| #include "gxio/iorpc_usb_host.h" | ||||
| 
 | ||||
| struct cfg_interrupt_param { | ||||
| 	union iorpc_interrupt interrupt; | ||||
| }; | ||||
| 
 | ||||
| int gxio_usb_host_cfg_interrupt(gxio_usb_host_context_t * context, int inter_x, | ||||
| 				int inter_y, int inter_ipi, int inter_event) | ||||
| { | ||||
| 	struct cfg_interrupt_param temp; | ||||
| 	struct cfg_interrupt_param *params = &temp; | ||||
| 
 | ||||
| 	params->interrupt.kernel.x = inter_x; | ||||
| 	params->interrupt.kernel.y = inter_y; | ||||
| 	params->interrupt.kernel.ipi = inter_ipi; | ||||
| 	params->interrupt.kernel.event = inter_event; | ||||
| 
 | ||||
| 	return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||||
| 			     sizeof(*params), GXIO_USB_HOST_OP_CFG_INTERRUPT); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(gxio_usb_host_cfg_interrupt); | ||||
| 
 | ||||
| struct register_client_memory_param { | ||||
| 	HV_PTE pte; | ||||
| 	unsigned int flags; | ||||
| }; | ||||
| 
 | ||||
| int gxio_usb_host_register_client_memory(gxio_usb_host_context_t * context, | ||||
| 					 HV_PTE pte, unsigned int flags) | ||||
| { | ||||
| 	struct register_client_memory_param temp; | ||||
| 	struct register_client_memory_param *params = &temp; | ||||
| 
 | ||||
| 	params->pte = pte; | ||||
| 	params->flags = flags; | ||||
| 
 | ||||
| 	return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||||
| 			     sizeof(*params), | ||||
| 			     GXIO_USB_HOST_OP_REGISTER_CLIENT_MEMORY); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(gxio_usb_host_register_client_memory); | ||||
| 
 | ||||
| struct get_mmio_base_param { | ||||
| 	HV_PTE base; | ||||
| }; | ||||
| 
 | ||||
| int gxio_usb_host_get_mmio_base(gxio_usb_host_context_t * context, HV_PTE *base) | ||||
| { | ||||
| 	int __result; | ||||
| 	struct get_mmio_base_param temp; | ||||
| 	struct get_mmio_base_param *params = &temp; | ||||
| 
 | ||||
| 	__result = | ||||
| 	    hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params), | ||||
| 			 GXIO_USB_HOST_OP_GET_MMIO_BASE); | ||||
| 	*base = params->base; | ||||
| 
 | ||||
| 	return __result; | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(gxio_usb_host_get_mmio_base); | ||||
| 
 | ||||
| struct check_mmio_offset_param { | ||||
| 	unsigned long offset; | ||||
| 	unsigned long size; | ||||
| }; | ||||
| 
 | ||||
| int gxio_usb_host_check_mmio_offset(gxio_usb_host_context_t * context, | ||||
| 				    unsigned long offset, unsigned long size) | ||||
| { | ||||
| 	struct check_mmio_offset_param temp; | ||||
| 	struct check_mmio_offset_param *params = &temp; | ||||
| 
 | ||||
| 	params->offset = offset; | ||||
| 	params->size = size; | ||||
| 
 | ||||
| 	return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||||
| 			     sizeof(*params), | ||||
| 			     GXIO_USB_HOST_OP_CHECK_MMIO_OFFSET); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(gxio_usb_host_check_mmio_offset); | ||||
							
								
								
									
										61
									
								
								arch/tile/gxio/kiorpc.c
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										61
									
								
								arch/tile/gxio/kiorpc.c
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,61 @@ | ||||
| /*
 | ||||
|  * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||||
|  * | ||||
|  *   This program is free software; you can redistribute it and/or | ||||
|  *   modify it under the terms of the GNU General Public License | ||||
|  *   as published by the Free Software Foundation, version 2. | ||||
|  * | ||||
|  *   This program is distributed in the hope that it will be useful, but | ||||
|  *   WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||||
|  *   NON INFRINGEMENT.  See the GNU General Public License for | ||||
|  *   more details. | ||||
|  * | ||||
|  * TILE-Gx IORPC support for kernel I/O drivers. | ||||
|  */ | ||||
| 
 | ||||
| #include <linux/mmzone.h> | ||||
| #include <linux/module.h> | ||||
| #include <linux/io.h> | ||||
| #include <gxio/iorpc_globals.h> | ||||
| #include <gxio/kiorpc.h> | ||||
| 
 | ||||
| #ifdef DEBUG_IORPC | ||||
| #define TRACE(FMT, ...) pr_info(SIMPLE_MSG_LINE FMT, ## __VA_ARGS__) | ||||
| #else | ||||
| #define TRACE(...) | ||||
| #endif | ||||
| 
 | ||||
| /* Create kernel-VA-space MMIO mapping for an on-chip IO device. */ | ||||
| void __iomem *iorpc_ioremap(int hv_fd, resource_size_t offset, | ||||
| 			    unsigned long size) | ||||
| { | ||||
| 	pgprot_t mmio_base, prot = { 0 }; | ||||
| 	unsigned long pfn; | ||||
| 	int err; | ||||
| 
 | ||||
| 	/* Look up the shim's lotar and base PA. */ | ||||
| 	err = __iorpc_get_mmio_base(hv_fd, &mmio_base); | ||||
| 	if (err) { | ||||
| 		TRACE("get_mmio_base() failure: %d\n", err); | ||||
| 		return NULL; | ||||
| 	} | ||||
| 
 | ||||
| 	/* Make sure the HV driver approves of our offset and size. */ | ||||
| 	err = __iorpc_check_mmio_offset(hv_fd, offset, size); | ||||
| 	if (err) { | ||||
| 		TRACE("check_mmio_offset() failure: %d\n", err); | ||||
| 		return NULL; | ||||
| 	} | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * mmio_base contains a base pfn and homing coordinates.  Turn | ||||
| 	 * it into an MMIO pgprot and offset pfn. | ||||
| 	 */ | ||||
| 	prot = hv_pte_set_lotar(prot, hv_pte_get_lotar(mmio_base)); | ||||
| 	pfn = pte_pfn(mmio_base) + PFN_DOWN(offset); | ||||
| 
 | ||||
| 	return ioremap_prot(PFN_PHYS(pfn), size, prot); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(iorpc_ioremap); | ||||
							
								
								
									
										545
									
								
								arch/tile/gxio/mpipe.c
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										545
									
								
								arch/tile/gxio/mpipe.c
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,545 @@ | ||||
| /*
 | ||||
|  * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||||
|  * | ||||
|  *   This program is free software; you can redistribute it and/or | ||||
|  *   modify it under the terms of the GNU General Public License | ||||
|  *   as published by the Free Software Foundation, version 2. | ||||
|  * | ||||
|  *   This program is distributed in the hope that it will be useful, but | ||||
|  *   WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||||
|  *   NON INFRINGEMENT.  See the GNU General Public License for | ||||
|  *   more details. | ||||
|  */ | ||||
| 
 | ||||
| /*
 | ||||
|  * Implementation of mpipe gxio calls. | ||||
|  */ | ||||
| 
 | ||||
| #include <linux/errno.h> | ||||
| #include <linux/io.h> | ||||
| #include <linux/module.h> | ||||
| 
 | ||||
| #include <gxio/iorpc_globals.h> | ||||
| #include <gxio/iorpc_mpipe.h> | ||||
| #include <gxio/iorpc_mpipe_info.h> | ||||
| #include <gxio/kiorpc.h> | ||||
| #include <gxio/mpipe.h> | ||||
| 
 | ||||
| /* HACK: Avoid pointless "shadow" warnings. */ | ||||
| #define link link_shadow | ||||
| 
 | ||||
| int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index) | ||||
| { | ||||
| 	char file[32]; | ||||
| 
 | ||||
| 	int fd; | ||||
| 	int i; | ||||
| 
 | ||||
| 	snprintf(file, sizeof(file), "mpipe/%d/iorpc", mpipe_index); | ||||
| 	fd = hv_dev_open((HV_VirtAddr) file, 0); | ||||
| 	if (fd < 0) { | ||||
| 		if (fd >= GXIO_ERR_MIN && fd <= GXIO_ERR_MAX) | ||||
| 			return fd; | ||||
| 		else | ||||
| 			return -ENODEV; | ||||
| 	} | ||||
| 
 | ||||
| 	context->fd = fd; | ||||
| 
 | ||||
| 	/* Map in the MMIO space. */ | ||||
| 	context->mmio_cfg_base = (void __force *) | ||||
| 		iorpc_ioremap(fd, HV_MPIPE_CONFIG_MMIO_OFFSET, | ||||
| 			      HV_MPIPE_CONFIG_MMIO_SIZE); | ||||
| 	if (context->mmio_cfg_base == NULL) | ||||
| 		goto cfg_failed; | ||||
| 
 | ||||
| 	context->mmio_fast_base = (void __force *) | ||||
| 		iorpc_ioremap(fd, HV_MPIPE_FAST_MMIO_OFFSET, | ||||
| 			      HV_MPIPE_FAST_MMIO_SIZE); | ||||
| 	if (context->mmio_fast_base == NULL) | ||||
| 		goto fast_failed; | ||||
| 
 | ||||
| 	/* Initialize the stacks. */ | ||||
| 	for (i = 0; i < 8; i++) | ||||
| 		context->__stacks.stacks[i] = 255; | ||||
| 
 | ||||
| 	return 0; | ||||
| 
 | ||||
|       fast_failed: | ||||
| 	iounmap((void __force __iomem *)(context->mmio_cfg_base)); | ||||
|       cfg_failed: | ||||
| 	hv_dev_close(context->fd); | ||||
| 	return -ENODEV; | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL_GPL(gxio_mpipe_init); | ||||
| 
 | ||||
| int gxio_mpipe_destroy(gxio_mpipe_context_t *context) | ||||
| { | ||||
| 	iounmap((void __force __iomem *)(context->mmio_cfg_base)); | ||||
| 	iounmap((void __force __iomem *)(context->mmio_fast_base)); | ||||
| 	return hv_dev_close(context->fd); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL_GPL(gxio_mpipe_destroy); | ||||
| 
 | ||||
| static int16_t gxio_mpipe_buffer_sizes[8] = | ||||
| 	{ 128, 256, 512, 1024, 1664, 4096, 10368, 16384 }; | ||||
| 
 | ||||
| gxio_mpipe_buffer_size_enum_t gxio_mpipe_buffer_size_to_buffer_size_enum(size_t | ||||
| 									 size) | ||||
| { | ||||
| 	int i; | ||||
| 	for (i = 0; i < 7; i++) | ||||
| 		if (size <= gxio_mpipe_buffer_sizes[i]) | ||||
| 			break; | ||||
| 	return i; | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL_GPL(gxio_mpipe_buffer_size_to_buffer_size_enum); | ||||
| 
 | ||||
| size_t gxio_mpipe_buffer_size_enum_to_buffer_size(gxio_mpipe_buffer_size_enum_t | ||||
| 						  buffer_size_enum) | ||||
| { | ||||
| 	if (buffer_size_enum > 7) | ||||
| 		buffer_size_enum = 7; | ||||
| 
 | ||||
| 	return gxio_mpipe_buffer_sizes[buffer_size_enum]; | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL_GPL(gxio_mpipe_buffer_size_enum_to_buffer_size); | ||||
| 
 | ||||
| size_t gxio_mpipe_calc_buffer_stack_bytes(unsigned long buffers) | ||||
| { | ||||
| 	const int BUFFERS_PER_LINE = 12; | ||||
| 
 | ||||
| 	/* Count the number of cachlines. */ | ||||
| 	unsigned long lines = | ||||
| 		(buffers + BUFFERS_PER_LINE - 1) / BUFFERS_PER_LINE; | ||||
| 
 | ||||
| 	/* Convert to bytes. */ | ||||
| 	return lines * CHIP_L2_LINE_SIZE(); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL_GPL(gxio_mpipe_calc_buffer_stack_bytes); | ||||
| 
 | ||||
| int gxio_mpipe_init_buffer_stack(gxio_mpipe_context_t *context, | ||||
| 				 unsigned int stack, | ||||
| 				 gxio_mpipe_buffer_size_enum_t | ||||
| 				 buffer_size_enum, void *mem, size_t mem_size, | ||||
| 				 unsigned int mem_flags) | ||||
| { | ||||
| 	int result; | ||||
| 
 | ||||
| 	memset(mem, 0, mem_size); | ||||
| 
 | ||||
| 	result = gxio_mpipe_init_buffer_stack_aux(context, mem, mem_size, | ||||
| 						  mem_flags, stack, | ||||
| 						  buffer_size_enum); | ||||
| 	if (result < 0) | ||||
| 		return result; | ||||
| 
 | ||||
| 	/* Save the stack. */ | ||||
| 	context->__stacks.stacks[buffer_size_enum] = stack; | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL_GPL(gxio_mpipe_init_buffer_stack); | ||||
| 
 | ||||
| int gxio_mpipe_init_notif_ring(gxio_mpipe_context_t *context, | ||||
| 			       unsigned int ring, | ||||
| 			       void *mem, size_t mem_size, | ||||
| 			       unsigned int mem_flags) | ||||
| { | ||||
| 	return gxio_mpipe_init_notif_ring_aux(context, mem, mem_size, | ||||
| 					      mem_flags, ring); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL_GPL(gxio_mpipe_init_notif_ring); | ||||
| 
 | ||||
| int gxio_mpipe_init_notif_group_and_buckets(gxio_mpipe_context_t *context, | ||||
| 					    unsigned int group, | ||||
| 					    unsigned int ring, | ||||
| 					    unsigned int num_rings, | ||||
| 					    unsigned int bucket, | ||||
| 					    unsigned int num_buckets, | ||||
| 					    gxio_mpipe_bucket_mode_t mode) | ||||
| { | ||||
| 	int i; | ||||
| 	int result; | ||||
| 
 | ||||
| 	gxio_mpipe_bucket_info_t bucket_info = { { | ||||
| 						  .group = group, | ||||
| 						  .mode = mode, | ||||
| 						  } | ||||
| 	}; | ||||
| 
 | ||||
| 	gxio_mpipe_notif_group_bits_t bits = { {0} }; | ||||
| 
 | ||||
| 	for (i = 0; i < num_rings; i++) | ||||
| 		gxio_mpipe_notif_group_add_ring(&bits, ring + i); | ||||
| 
 | ||||
| 	result = gxio_mpipe_init_notif_group(context, group, bits); | ||||
| 	if (result != 0) | ||||
| 		return result; | ||||
| 
 | ||||
| 	for (i = 0; i < num_buckets; i++) { | ||||
| 		bucket_info.notifring = ring + (i % num_rings); | ||||
| 
 | ||||
| 		result = gxio_mpipe_init_bucket(context, bucket + i, | ||||
| 						bucket_info); | ||||
| 		if (result != 0) | ||||
| 			return result; | ||||
| 	} | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL_GPL(gxio_mpipe_init_notif_group_and_buckets); | ||||
| 
 | ||||
| int gxio_mpipe_init_edma_ring(gxio_mpipe_context_t *context, | ||||
| 			      unsigned int ring, unsigned int channel, | ||||
| 			      void *mem, size_t mem_size, | ||||
| 			      unsigned int mem_flags) | ||||
| { | ||||
| 	memset(mem, 0, mem_size); | ||||
| 
 | ||||
| 	return gxio_mpipe_init_edma_ring_aux(context, mem, mem_size, mem_flags, | ||||
| 					     ring, channel); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL_GPL(gxio_mpipe_init_edma_ring); | ||||
| 
 | ||||
| void gxio_mpipe_rules_init(gxio_mpipe_rules_t *rules, | ||||
| 			   gxio_mpipe_context_t *context) | ||||
| { | ||||
| 	rules->context = context; | ||||
| 	memset(&rules->list, 0, sizeof(rules->list)); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL_GPL(gxio_mpipe_rules_init); | ||||
| 
 | ||||
| int gxio_mpipe_rules_begin(gxio_mpipe_rules_t *rules, | ||||
| 			   unsigned int bucket, unsigned int num_buckets, | ||||
| 			   gxio_mpipe_rules_stacks_t *stacks) | ||||
| { | ||||
| 	int i; | ||||
| 	int stack = 255; | ||||
| 
 | ||||
| 	gxio_mpipe_rules_list_t *list = &rules->list; | ||||
| 
 | ||||
| 	/* Current rule. */ | ||||
| 	gxio_mpipe_rules_rule_t *rule = | ||||
| 		(gxio_mpipe_rules_rule_t *) (list->rules + list->head); | ||||
| 
 | ||||
| 	unsigned int head = list->tail; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Align next rule properly. | ||||
| 	 *Note that "dmacs_and_vlans" will also be aligned. | ||||
| 	 */ | ||||
| 	unsigned int pad = 0; | ||||
| 	while (((head + pad) % __alignof__(gxio_mpipe_rules_rule_t)) != 0) | ||||
| 		pad++; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Verify room. | ||||
| 	 * ISSUE: Mark rules as broken on error? | ||||
| 	 */ | ||||
| 	if (head + pad + sizeof(*rule) >= sizeof(list->rules)) | ||||
| 		return GXIO_MPIPE_ERR_RULES_FULL; | ||||
| 
 | ||||
| 	/* Verify num_buckets is a power of 2. */ | ||||
| 	if (__builtin_popcount(num_buckets) != 1) | ||||
| 		return GXIO_MPIPE_ERR_RULES_INVALID; | ||||
| 
 | ||||
| 	/* Add padding to previous rule. */ | ||||
| 	rule->size += pad; | ||||
| 
 | ||||
| 	/* Start a new rule. */ | ||||
| 	list->head = head + pad; | ||||
| 
 | ||||
| 	rule = (gxio_mpipe_rules_rule_t *) (list->rules + list->head); | ||||
| 
 | ||||
| 	/* Default some values. */ | ||||
| 	rule->headroom = 2; | ||||
| 	rule->tailroom = 0; | ||||
| 	rule->capacity = 16384; | ||||
| 
 | ||||
| 	/* Save the bucket info. */ | ||||
| 	rule->bucket_mask = num_buckets - 1; | ||||
| 	rule->bucket_first = bucket; | ||||
| 
 | ||||
| 	for (i = 8 - 1; i >= 0; i--) { | ||||
| 		int maybe = | ||||
| 			stacks ? stacks->stacks[i] : rules->context->__stacks. | ||||
| 			stacks[i]; | ||||
| 		if (maybe != 255) | ||||
| 			stack = maybe; | ||||
| 		rule->stacks.stacks[i] = stack; | ||||
| 	} | ||||
| 
 | ||||
| 	if (stack == 255) | ||||
| 		return GXIO_MPIPE_ERR_RULES_INVALID; | ||||
| 
 | ||||
| 	/* NOTE: Only entries at the end of the array can be 255. */ | ||||
| 	for (i = 8 - 1; i > 0; i--) { | ||||
| 		if (rule->stacks.stacks[i] == 255) { | ||||
| 			rule->stacks.stacks[i] = stack; | ||||
| 			rule->capacity = | ||||
| 				gxio_mpipe_buffer_size_enum_to_buffer_size(i - | ||||
| 									   1); | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	rule->size = sizeof(*rule); | ||||
| 	list->tail = list->head + rule->size; | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL_GPL(gxio_mpipe_rules_begin); | ||||
| 
 | ||||
| int gxio_mpipe_rules_add_channel(gxio_mpipe_rules_t *rules, | ||||
| 				 unsigned int channel) | ||||
| { | ||||
| 	gxio_mpipe_rules_list_t *list = &rules->list; | ||||
| 
 | ||||
| 	gxio_mpipe_rules_rule_t *rule = | ||||
| 		(gxio_mpipe_rules_rule_t *) (list->rules + list->head); | ||||
| 
 | ||||
| 	/* Verify channel. */ | ||||
| 	if (channel >= 32) | ||||
| 		return GXIO_MPIPE_ERR_RULES_INVALID; | ||||
| 
 | ||||
| 	/* Verify begun. */ | ||||
| 	if (list->tail == 0) | ||||
| 		return GXIO_MPIPE_ERR_RULES_EMPTY; | ||||
| 
 | ||||
| 	rule->channel_bits |= (1UL << channel); | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL_GPL(gxio_mpipe_rules_add_channel); | ||||
| 
 | ||||
| int gxio_mpipe_rules_set_headroom(gxio_mpipe_rules_t *rules, uint8_t headroom) | ||||
| { | ||||
| 	gxio_mpipe_rules_list_t *list = &rules->list; | ||||
| 
 | ||||
| 	gxio_mpipe_rules_rule_t *rule = | ||||
| 		(gxio_mpipe_rules_rule_t *) (list->rules + list->head); | ||||
| 
 | ||||
| 	/* Verify begun. */ | ||||
| 	if (list->tail == 0) | ||||
| 		return GXIO_MPIPE_ERR_RULES_EMPTY; | ||||
| 
 | ||||
| 	rule->headroom = headroom; | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL_GPL(gxio_mpipe_rules_set_headroom); | ||||
| 
 | ||||
| int gxio_mpipe_rules_commit(gxio_mpipe_rules_t *rules) | ||||
| { | ||||
| 	gxio_mpipe_rules_list_t *list = &rules->list; | ||||
| 	unsigned int size = | ||||
| 		offsetof(gxio_mpipe_rules_list_t, rules) + list->tail; | ||||
| 	return gxio_mpipe_commit_rules(rules->context, list, size); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL_GPL(gxio_mpipe_rules_commit); | ||||
| 
 | ||||
| int gxio_mpipe_iqueue_init(gxio_mpipe_iqueue_t *iqueue, | ||||
| 			   gxio_mpipe_context_t *context, | ||||
| 			   unsigned int ring, | ||||
| 			   void *mem, size_t mem_size, unsigned int mem_flags) | ||||
| { | ||||
| 	/* The init call below will verify that "mem_size" is legal. */ | ||||
| 	unsigned int num_entries = mem_size / sizeof(gxio_mpipe_idesc_t); | ||||
| 
 | ||||
| 	iqueue->context = context; | ||||
| 	iqueue->idescs = (gxio_mpipe_idesc_t *)mem; | ||||
| 	iqueue->ring = ring; | ||||
| 	iqueue->num_entries = num_entries; | ||||
| 	iqueue->mask_num_entries = num_entries - 1; | ||||
| 	iqueue->log2_num_entries = __builtin_ctz(num_entries); | ||||
| 	iqueue->head = 1; | ||||
| #ifdef __BIG_ENDIAN__ | ||||
| 	iqueue->swapped = 0; | ||||
| #endif | ||||
| 
 | ||||
| 	/* Initialize the "tail". */ | ||||
| 	__gxio_mmio_write(mem, iqueue->head); | ||||
| 
 | ||||
| 	return gxio_mpipe_init_notif_ring(context, ring, mem, mem_size, | ||||
| 					  mem_flags); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL_GPL(gxio_mpipe_iqueue_init); | ||||
| 
 | ||||
| int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue, | ||||
| 			   gxio_mpipe_context_t *context, | ||||
| 			   unsigned int edma_ring_id, | ||||
| 			   unsigned int channel, | ||||
| 			   void *mem, unsigned int mem_size, | ||||
| 			   unsigned int mem_flags) | ||||
| { | ||||
| 	/* The init call below will verify that "mem_size" is legal. */ | ||||
| 	unsigned int num_entries = mem_size / sizeof(gxio_mpipe_edesc_t); | ||||
| 
 | ||||
| 	/* Offset used to read number of completed commands. */ | ||||
| 	MPIPE_EDMA_POST_REGION_ADDR_t offset; | ||||
| 
 | ||||
| 	int result = gxio_mpipe_init_edma_ring(context, edma_ring_id, channel, | ||||
| 					       mem, mem_size, mem_flags); | ||||
| 	if (result < 0) | ||||
| 		return result; | ||||
| 
 | ||||
| 	memset(equeue, 0, sizeof(*equeue)); | ||||
| 
 | ||||
| 	offset.word = 0; | ||||
| 	offset.region = | ||||
| 		MPIPE_MMIO_ADDR__REGION_VAL_EDMA - | ||||
| 		MPIPE_MMIO_ADDR__REGION_VAL_IDMA; | ||||
| 	offset.ring = edma_ring_id; | ||||
| 
 | ||||
| 	__gxio_dma_queue_init(&equeue->dma_queue, | ||||
| 			      context->mmio_fast_base + offset.word, | ||||
| 			      num_entries); | ||||
| 	equeue->edescs = mem; | ||||
| 	equeue->mask_num_entries = num_entries - 1; | ||||
| 	equeue->log2_num_entries = __builtin_ctz(num_entries); | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL_GPL(gxio_mpipe_equeue_init); | ||||
| 
 | ||||
| int gxio_mpipe_set_timestamp(gxio_mpipe_context_t *context, | ||||
| 			     const struct timespec *ts) | ||||
| { | ||||
| 	cycles_t cycles = get_cycles(); | ||||
| 	return gxio_mpipe_set_timestamp_aux(context, (uint64_t)ts->tv_sec, | ||||
| 					    (uint64_t)ts->tv_nsec, | ||||
| 					    (uint64_t)cycles); | ||||
| } | ||||
| 
 | ||||
| int gxio_mpipe_get_timestamp(gxio_mpipe_context_t *context, | ||||
| 			     struct timespec *ts) | ||||
| { | ||||
| 	int ret; | ||||
| 	cycles_t cycles_prev, cycles_now, clock_rate; | ||||
| 	cycles_prev = get_cycles(); | ||||
| 	ret = gxio_mpipe_get_timestamp_aux(context, (uint64_t *)&ts->tv_sec, | ||||
| 					   (uint64_t *)&ts->tv_nsec, | ||||
| 					   (uint64_t *)&cycles_now); | ||||
| 	if (ret < 0) { | ||||
| 		return ret; | ||||
| 	} | ||||
| 
 | ||||
| 	clock_rate = get_clock_rate(); | ||||
| 	ts->tv_nsec -= (cycles_now - cycles_prev) * 1000000000LL / clock_rate; | ||||
| 	if (ts->tv_nsec < 0) { | ||||
| 		ts->tv_nsec += 1000000000LL; | ||||
| 		ts->tv_sec -= 1; | ||||
| 	} | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| int gxio_mpipe_adjust_timestamp(gxio_mpipe_context_t *context, int64_t delta) | ||||
| { | ||||
| 	return gxio_mpipe_adjust_timestamp_aux(context, delta); | ||||
| } | ||||
| 
 | ||||
| /* Get our internal context used for link name access.  This context is
 | ||||
|  *  special in that it is not associated with an mPIPE service domain. | ||||
|  */ | ||||
| static gxio_mpipe_context_t *_gxio_get_link_context(void) | ||||
| { | ||||
| 	static gxio_mpipe_context_t context; | ||||
| 	static gxio_mpipe_context_t *contextp; | ||||
| 	static int tried_open = 0; | ||||
| 	static DEFINE_MUTEX(mutex); | ||||
| 
 | ||||
| 	mutex_lock(&mutex); | ||||
| 
 | ||||
| 	if (!tried_open) { | ||||
| 		int i = 0; | ||||
| 		tried_open = 1; | ||||
| 
 | ||||
| 		/*
 | ||||
| 		 * "4" here is the maximum possible number of mPIPE shims; it's | ||||
| 		 * an exaggeration but we shouldn't ever go beyond 2 anyway. | ||||
| 		 */ | ||||
| 		for (i = 0; i < 4; i++) { | ||||
| 			char file[80]; | ||||
| 
 | ||||
| 			snprintf(file, sizeof(file), "mpipe/%d/iorpc_info", i); | ||||
| 			context.fd = hv_dev_open((HV_VirtAddr) file, 0); | ||||
| 			if (context.fd < 0) | ||||
| 				continue; | ||||
| 
 | ||||
| 			contextp = &context; | ||||
| 			break; | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	mutex_unlock(&mutex); | ||||
| 
 | ||||
| 	return contextp; | ||||
| } | ||||
| 
 | ||||
| int gxio_mpipe_link_enumerate_mac(int idx, char *link_name, uint8_t *link_mac) | ||||
| { | ||||
| 	int rv; | ||||
| 	_gxio_mpipe_link_name_t name; | ||||
| 	_gxio_mpipe_link_mac_t mac; | ||||
| 
 | ||||
| 	gxio_mpipe_context_t *context = _gxio_get_link_context(); | ||||
| 	if (!context) | ||||
| 		return GXIO_ERR_NO_DEVICE; | ||||
| 
 | ||||
| 	rv = gxio_mpipe_info_enumerate_aux(context, idx, &name, &mac); | ||||
| 	if (rv >= 0) { | ||||
| 		strncpy(link_name, name.name, sizeof(name.name)); | ||||
| 		memcpy(link_mac, mac.mac, sizeof(mac.mac)); | ||||
| 	} | ||||
| 
 | ||||
| 	return rv; | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL_GPL(gxio_mpipe_link_enumerate_mac); | ||||
| 
 | ||||
| int gxio_mpipe_link_open(gxio_mpipe_link_t *link, | ||||
| 			 gxio_mpipe_context_t *context, const char *link_name, | ||||
| 			 unsigned int flags) | ||||
| { | ||||
| 	_gxio_mpipe_link_name_t name; | ||||
| 	int rv; | ||||
| 
 | ||||
| 	strncpy(name.name, link_name, sizeof(name.name)); | ||||
| 	name.name[GXIO_MPIPE_LINK_NAME_LEN - 1] = '\0'; | ||||
| 
 | ||||
| 	rv = gxio_mpipe_link_open_aux(context, name, flags); | ||||
| 	if (rv < 0) | ||||
| 		return rv; | ||||
| 
 | ||||
| 	link->context = context; | ||||
| 	link->channel = rv >> 8; | ||||
| 	link->mac = rv & 0xFF; | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL_GPL(gxio_mpipe_link_open); | ||||
| 
 | ||||
| int gxio_mpipe_link_close(gxio_mpipe_link_t *link) | ||||
| { | ||||
| 	return gxio_mpipe_link_close_aux(link->context, link->mac); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL_GPL(gxio_mpipe_link_close); | ||||
							
								
								
									
										49
									
								
								arch/tile/gxio/trio.c
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										49
									
								
								arch/tile/gxio/trio.c
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,49 @@ | ||||
| /*
 | ||||
|  * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||||
|  * | ||||
|  *   This program is free software; you can redistribute it and/or | ||||
|  *   modify it under the terms of the GNU General Public License | ||||
|  *   as published by the Free Software Foundation, version 2. | ||||
|  * | ||||
|  *   This program is distributed in the hope that it will be useful, but | ||||
|  *   WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||||
|  *   NON INFRINGEMENT.  See the GNU General Public License for | ||||
|  *   more details. | ||||
|  */ | ||||
| 
 | ||||
| /*
 | ||||
|  * Implementation of trio gxio calls. | ||||
|  */ | ||||
| 
 | ||||
| #include <linux/errno.h> | ||||
| #include <linux/io.h> | ||||
| #include <linux/module.h> | ||||
| 
 | ||||
| #include <gxio/trio.h> | ||||
| #include <gxio/iorpc_globals.h> | ||||
| #include <gxio/iorpc_trio.h> | ||||
| #include <gxio/kiorpc.h> | ||||
| 
 | ||||
| int gxio_trio_init(gxio_trio_context_t *context, unsigned int trio_index) | ||||
| { | ||||
| 	char file[32]; | ||||
| 	int fd; | ||||
| 
 | ||||
| 	snprintf(file, sizeof(file), "trio/%d/iorpc", trio_index); | ||||
| 	fd = hv_dev_open((HV_VirtAddr) file, 0); | ||||
| 	if (fd < 0) { | ||||
| 		context->fd = -1; | ||||
| 
 | ||||
| 		if (fd >= GXIO_ERR_MIN && fd <= GXIO_ERR_MAX) | ||||
| 			return fd; | ||||
| 		else | ||||
| 			return -ENODEV; | ||||
| 	} | ||||
| 
 | ||||
| 	context->fd = fd; | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL_GPL(gxio_trio_init); | ||||
							
								
								
									
										91
									
								
								arch/tile/gxio/usb_host.c
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										91
									
								
								arch/tile/gxio/usb_host.c
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,91 @@ | ||||
| /*
 | ||||
|  * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||||
|  * | ||||
|  *   This program is free software; you can redistribute it and/or | ||||
|  *   modify it under the terms of the GNU General Public License | ||||
|  *   as published by the Free Software Foundation, version 2. | ||||
|  * | ||||
|  *   This program is distributed in the hope that it will be useful, but | ||||
|  *   WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||||
|  *   NON INFRINGEMENT.  See the GNU General Public License for | ||||
|  *   more details. | ||||
|  */ | ||||
| 
 | ||||
| /*
 | ||||
|  * | ||||
|  * Implementation of USB gxio calls. | ||||
|  */ | ||||
| 
 | ||||
| #include <linux/io.h> | ||||
| #include <linux/errno.h> | ||||
| #include <linux/module.h> | ||||
| 
 | ||||
| #include <gxio/iorpc_globals.h> | ||||
| #include <gxio/iorpc_usb_host.h> | ||||
| #include <gxio/kiorpc.h> | ||||
| #include <gxio/usb_host.h> | ||||
| 
 | ||||
| int gxio_usb_host_init(gxio_usb_host_context_t * context, int usb_index, | ||||
| 		       int is_ehci) | ||||
| { | ||||
| 	char file[32]; | ||||
| 	int fd; | ||||
| 
 | ||||
| 	if (is_ehci) | ||||
| 		snprintf(file, sizeof(file), "usb_host/%d/iorpc/ehci", | ||||
| 			 usb_index); | ||||
| 	else | ||||
| 		snprintf(file, sizeof(file), "usb_host/%d/iorpc/ohci", | ||||
| 			 usb_index); | ||||
| 
 | ||||
| 	fd = hv_dev_open((HV_VirtAddr) file, 0); | ||||
| 	if (fd < 0) { | ||||
| 		if (fd >= GXIO_ERR_MIN && fd <= GXIO_ERR_MAX) | ||||
| 			return fd; | ||||
| 		else | ||||
| 			return -ENODEV; | ||||
| 	} | ||||
| 
 | ||||
| 	context->fd = fd; | ||||
| 
 | ||||
| 	// Map in the MMIO space.
 | ||||
| 	context->mmio_base = | ||||
| 		(void __force *)iorpc_ioremap(fd, 0, HV_USB_HOST_MMIO_SIZE); | ||||
| 
 | ||||
| 	if (context->mmio_base == NULL) { | ||||
| 		hv_dev_close(context->fd); | ||||
| 		return -ENODEV; | ||||
| 	} | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL_GPL(gxio_usb_host_init); | ||||
| 
 | ||||
| int gxio_usb_host_destroy(gxio_usb_host_context_t * context) | ||||
| { | ||||
| 	iounmap((void __force __iomem *)(context->mmio_base)); | ||||
| 	hv_dev_close(context->fd); | ||||
| 
 | ||||
| 	context->mmio_base = NULL; | ||||
| 	context->fd = -1; | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL_GPL(gxio_usb_host_destroy); | ||||
| 
 | ||||
| void *gxio_usb_host_get_reg_start(gxio_usb_host_context_t * context) | ||||
| { | ||||
| 	return context->mmio_base; | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL_GPL(gxio_usb_host_get_reg_start); | ||||
| 
 | ||||
| size_t gxio_usb_host_get_reg_len(gxio_usb_host_context_t * context) | ||||
| { | ||||
| 	return HV_USB_HOST_MMIO_SIZE; | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL_GPL(gxio_usb_host_get_reg_len); | ||||
							
								
								
									
										359
									
								
								arch/tile/include/arch/mpipe.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										359
									
								
								arch/tile/include/arch/mpipe.h
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,359 @@ | ||||
| /*
 | ||||
|  * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||||
|  * | ||||
|  *   This program is free software; you can redistribute it and/or | ||||
|  *   modify it under the terms of the GNU General Public License | ||||
|  *   as published by the Free Software Foundation, version 2. | ||||
|  * | ||||
|  *   This program is distributed in the hope that it will be useful, but | ||||
|  *   WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||||
|  *   NON INFRINGEMENT.  See the GNU General Public License for | ||||
|  *   more details. | ||||
|  */ | ||||
| 
 | ||||
| /* Machine-generated file; do not edit. */ | ||||
| 
 | ||||
| #ifndef __ARCH_MPIPE_H__ | ||||
| #define __ARCH_MPIPE_H__ | ||||
| 
 | ||||
| #include <arch/abi.h> | ||||
| #include <arch/mpipe_def.h> | ||||
| 
 | ||||
| #ifndef __ASSEMBLER__ | ||||
| 
 | ||||
| /*
 | ||||
|  * MMIO Ingress DMA Release Region Address. | ||||
|  * This is a description of the physical addresses used to manipulate ingress | ||||
|  * credit counters.  Accesses to this address space should use an address of | ||||
|  * this form and a value like that specified in IDMA_RELEASE_REGION_VAL. | ||||
|  */ | ||||
| 
 | ||||
| __extension__ | ||||
| typedef union | ||||
| { | ||||
|   struct | ||||
|   { | ||||
| #ifndef __BIG_ENDIAN__ | ||||
|     /* Reserved. */ | ||||
|     uint_reg_t __reserved_0  : 3; | ||||
|     /* NotifRing to be released */ | ||||
|     uint_reg_t ring          : 8; | ||||
|     /* Bucket to be released */ | ||||
|     uint_reg_t bucket        : 13; | ||||
|     /* Enable NotifRing release */ | ||||
|     uint_reg_t ring_enable   : 1; | ||||
|     /* Enable Bucket release */ | ||||
|     uint_reg_t bucket_enable : 1; | ||||
|     /*
 | ||||
|      * This field of the address selects the region (address space) to be | ||||
|      * accessed.  For the iDMA release region, this field must be 4. | ||||
|      */ | ||||
|     uint_reg_t region        : 3; | ||||
|     /* Reserved. */ | ||||
|     uint_reg_t __reserved_1  : 6; | ||||
|     /* This field of the address indexes the 32 entry service domain table. */ | ||||
|     uint_reg_t svc_dom       : 5; | ||||
|     /* Reserved. */ | ||||
|     uint_reg_t __reserved_2  : 24; | ||||
| #else   /* __BIG_ENDIAN__ */ | ||||
|     uint_reg_t __reserved_2  : 24; | ||||
|     uint_reg_t svc_dom       : 5; | ||||
|     uint_reg_t __reserved_1  : 6; | ||||
|     uint_reg_t region        : 3; | ||||
|     uint_reg_t bucket_enable : 1; | ||||
|     uint_reg_t ring_enable   : 1; | ||||
|     uint_reg_t bucket        : 13; | ||||
|     uint_reg_t ring          : 8; | ||||
|     uint_reg_t __reserved_0  : 3; | ||||
| #endif | ||||
|   }; | ||||
| 
 | ||||
|   uint_reg_t word; | ||||
| } MPIPE_IDMA_RELEASE_REGION_ADDR_t; | ||||
| 
 | ||||
| /*
 | ||||
|  * MMIO Ingress DMA Release Region Value - Release NotifRing and/or Bucket. | ||||
|  * Provides release of the associated NotifRing.  The address of the MMIO | ||||
|  * operation is described in IDMA_RELEASE_REGION_ADDR. | ||||
|  */ | ||||
| 
 | ||||
| __extension__ | ||||
| typedef union | ||||
| { | ||||
|   struct | ||||
|   { | ||||
| #ifndef __BIG_ENDIAN__ | ||||
|     /*
 | ||||
|      * Number of packets being released.  The load balancer's count of | ||||
|      * inflight packets will be decremented by this amount for the associated | ||||
|      * Bucket and/or NotifRing | ||||
|      */ | ||||
|     uint_reg_t count      : 16; | ||||
|     /* Reserved. */ | ||||
|     uint_reg_t __reserved : 48; | ||||
| #else   /* __BIG_ENDIAN__ */ | ||||
|     uint_reg_t __reserved : 48; | ||||
|     uint_reg_t count      : 16; | ||||
| #endif | ||||
|   }; | ||||
| 
 | ||||
|   uint_reg_t word; | ||||
| } MPIPE_IDMA_RELEASE_REGION_VAL_t; | ||||
| 
 | ||||
| /*
 | ||||
|  * MMIO Buffer Stack Manager Region Address. | ||||
|  * This MMIO region is used for posting or fetching buffers to/from the | ||||
|  * buffer stack manager.  On an MMIO load, this pops a buffer descriptor from | ||||
|  * the top of stack if one is available.  On an MMIO store, this pushes a | ||||
|  * buffer to the stack.  The value read or written is described in | ||||
|  * BSM_REGION_VAL. | ||||
|  */ | ||||
| 
 | ||||
| __extension__ | ||||
| typedef union | ||||
| { | ||||
|   struct | ||||
|   { | ||||
| #ifndef __BIG_ENDIAN__ | ||||
|     /* Reserved. */ | ||||
|     uint_reg_t __reserved_0 : 3; | ||||
|     /* BufferStack being accessed. */ | ||||
|     uint_reg_t stack        : 5; | ||||
|     /* Reserved. */ | ||||
|     uint_reg_t __reserved_1 : 18; | ||||
|     /*
 | ||||
|      * This field of the address selects the region (address space) to be | ||||
|      * accessed.  For the buffer stack manager region, this field must be 6. | ||||
|      */ | ||||
|     uint_reg_t region       : 3; | ||||
|     /* Reserved. */ | ||||
|     uint_reg_t __reserved_2 : 6; | ||||
|     /* This field of the address indexes the 32 entry service domain table. */ | ||||
|     uint_reg_t svc_dom      : 5; | ||||
|     /* Reserved. */ | ||||
|     uint_reg_t __reserved_3 : 24; | ||||
| #else   /* __BIG_ENDIAN__ */ | ||||
|     uint_reg_t __reserved_3 : 24; | ||||
|     uint_reg_t svc_dom      : 5; | ||||
|     uint_reg_t __reserved_2 : 6; | ||||
|     uint_reg_t region       : 3; | ||||
|     uint_reg_t __reserved_1 : 18; | ||||
|     uint_reg_t stack        : 5; | ||||
|     uint_reg_t __reserved_0 : 3; | ||||
| #endif | ||||
|   }; | ||||
| 
 | ||||
|   uint_reg_t word; | ||||
| } MPIPE_BSM_REGION_ADDR_t; | ||||
| 
 | ||||
| /*
 | ||||
|  * MMIO Buffer Stack Manager Region Value. | ||||
|  * This MMIO region is used for posting or fetching buffers to/from the | ||||
|  * buffer stack manager.  On an MMIO load, this pops a buffer descriptor from | ||||
|  * the top of stack if one is available. On an MMIO store, this pushes a | ||||
|  * buffer to the stack.  The address of the MMIO operation is described in | ||||
|  * BSM_REGION_ADDR. | ||||
|  */ | ||||
| 
 | ||||
| __extension__ | ||||
| typedef union | ||||
| { | ||||
|   struct | ||||
|   { | ||||
| #ifndef __BIG_ENDIAN__ | ||||
|     /* Reserved. */ | ||||
|     uint_reg_t __reserved_0 : 7; | ||||
|     /*
 | ||||
|      * Base virtual address of the buffer.  Must be sign extended by consumer. | ||||
|      */ | ||||
|     int_reg_t va           : 35; | ||||
|     /* Reserved. */ | ||||
|     uint_reg_t __reserved_1 : 6; | ||||
|     /*
 | ||||
|      * Index of the buffer stack to which this buffer belongs.  Ignored on | ||||
|      * writes since the offset bits specify the stack being accessed. | ||||
|      */ | ||||
|     uint_reg_t stack_idx    : 5; | ||||
|     /* Reserved. */ | ||||
|     uint_reg_t __reserved_2 : 5; | ||||
|     /*
 | ||||
|      * Reads as one to indicate that this is a hardware managed buffer. | ||||
|      * Ignored on writes since all buffers on a given stack are the same size. | ||||
|      */ | ||||
|     uint_reg_t hwb          : 1; | ||||
|     /*
 | ||||
|      * Encoded size of buffer (ignored on writes): | ||||
|      * 0 = 128 bytes | ||||
|      * 1 = 256 bytes | ||||
|      * 2 = 512 bytes | ||||
|      * 3 = 1024 bytes | ||||
|      * 4 = 1664 bytes | ||||
|      * 5 = 4096 bytes | ||||
|      * 6 = 10368 bytes | ||||
|      * 7 = 16384 bytes | ||||
|      */ | ||||
|     uint_reg_t size         : 3; | ||||
|     /*
 | ||||
|      * Valid indication for the buffer.  Ignored on writes. | ||||
|      * 0 : Valid buffer descriptor popped from stack. | ||||
|      * 3 : Could not pop a buffer from the stack.  Either the stack is empty, | ||||
|      * or the hardware's prefetch buffer is empty for this stack. | ||||
|      */ | ||||
|     uint_reg_t c            : 2; | ||||
| #else   /* __BIG_ENDIAN__ */ | ||||
|     uint_reg_t c            : 2; | ||||
|     uint_reg_t size         : 3; | ||||
|     uint_reg_t hwb          : 1; | ||||
|     uint_reg_t __reserved_2 : 5; | ||||
|     uint_reg_t stack_idx    : 5; | ||||
|     uint_reg_t __reserved_1 : 6; | ||||
|     int_reg_t va           : 35; | ||||
|     uint_reg_t __reserved_0 : 7; | ||||
| #endif | ||||
|   }; | ||||
| 
 | ||||
|   uint_reg_t word; | ||||
| } MPIPE_BSM_REGION_VAL_t; | ||||
| 
 | ||||
| /*
 | ||||
|  * MMIO Egress DMA Post Region Address. | ||||
|  * Used to post descriptor locations to the eDMA descriptor engine.  The | ||||
|  * value to be written is described in EDMA_POST_REGION_VAL | ||||
|  */ | ||||
| 
 | ||||
| __extension__ | ||||
| typedef union | ||||
| { | ||||
|   struct | ||||
|   { | ||||
| #ifndef __BIG_ENDIAN__ | ||||
|     /* Reserved. */ | ||||
|     uint_reg_t __reserved_0 : 3; | ||||
|     /* eDMA ring being accessed */ | ||||
|     uint_reg_t ring         : 5; | ||||
|     /* Reserved. */ | ||||
|     uint_reg_t __reserved_1 : 18; | ||||
|     /*
 | ||||
|      * This field of the address selects the region (address space) to be | ||||
|      * accessed.  For the egress DMA post region, this field must be 5. | ||||
|      */ | ||||
|     uint_reg_t region       : 3; | ||||
|     /* Reserved. */ | ||||
|     uint_reg_t __reserved_2 : 6; | ||||
|     /* This field of the address indexes the 32 entry service domain table. */ | ||||
|     uint_reg_t svc_dom      : 5; | ||||
|     /* Reserved. */ | ||||
|     uint_reg_t __reserved_3 : 24; | ||||
| #else   /* __BIG_ENDIAN__ */ | ||||
|     uint_reg_t __reserved_3 : 24; | ||||
|     uint_reg_t svc_dom      : 5; | ||||
|     uint_reg_t __reserved_2 : 6; | ||||
|     uint_reg_t region       : 3; | ||||
|     uint_reg_t __reserved_1 : 18; | ||||
|     uint_reg_t ring         : 5; | ||||
|     uint_reg_t __reserved_0 : 3; | ||||
| #endif | ||||
|   }; | ||||
| 
 | ||||
|   uint_reg_t word; | ||||
| } MPIPE_EDMA_POST_REGION_ADDR_t; | ||||
| 
 | ||||
| /*
 | ||||
|  * MMIO Egress DMA Post Region Value. | ||||
|  * Used to post descriptor locations to the eDMA descriptor engine.  The | ||||
|  * address is described in EDMA_POST_REGION_ADDR. | ||||
|  */ | ||||
| 
 | ||||
| __extension__ | ||||
| typedef union | ||||
| { | ||||
|   struct | ||||
|   { | ||||
| #ifndef __BIG_ENDIAN__ | ||||
|     /*
 | ||||
|      * For writes, this specifies the current ring tail pointer prior to any | ||||
|      * post.  For example, to post 1 or more descriptors starting at location | ||||
|      * 23, this would contain 23 (not 24).  On writes, this index must be | ||||
|      * masked based on the ring size.  The new tail pointer after this post | ||||
|      * is COUNT+RING_IDX (masked by the ring size). | ||||
|      * | ||||
|      * For reads, this provides the hardware descriptor fetcher's head | ||||
|      * pointer.  The descriptors prior to the head pointer, however, may not | ||||
|      * yet have been processed so this indicator is only used to determine | ||||
|      * how full the ring is and if software may post more descriptors. | ||||
|      */ | ||||
|     uint_reg_t ring_idx   : 16; | ||||
|     /*
 | ||||
|      * For writes, this specifies number of contiguous descriptors that are | ||||
|      * being posted.  Software may post up to RingSize descriptors with a | ||||
|      * single MMIO store.  A zero in this field on a write will "wake up" an | ||||
|      * eDMA ring and cause it fetch descriptors regardless of the hardware's | ||||
|      * current view of the state of the tail pointer. | ||||
|      * | ||||
|      * For reads, this field provides a rolling count of the number of | ||||
|      * descriptors that have been completely processed.  This may be used by | ||||
|      * software to determine when buffers associated with a descriptor may be | ||||
|      * returned or reused.  When the ring's flush bit is cleared by software | ||||
|      * (after having been set by HW or SW), the COUNT will be cleared. | ||||
|      */ | ||||
|     uint_reg_t count      : 16; | ||||
|     /*
 | ||||
|      * For writes, this specifies the generation number of the tail being | ||||
|      * posted. Note that if tail+cnt wraps to the beginning of the ring, the | ||||
|      * eDMA hardware assumes that the descriptors posted at the beginning of | ||||
|      * the ring are also valid so it is okay to post around the wrap point. | ||||
|      * | ||||
|      * For reads, this is the current generation number.  Valid descriptors | ||||
|      * will have the inverse of this generation number. | ||||
|      */ | ||||
|     uint_reg_t gen        : 1; | ||||
|     /* Reserved. */ | ||||
|     uint_reg_t __reserved : 31; | ||||
| #else   /* __BIG_ENDIAN__ */ | ||||
|     uint_reg_t __reserved : 31; | ||||
|     uint_reg_t gen        : 1; | ||||
|     uint_reg_t count      : 16; | ||||
|     uint_reg_t ring_idx   : 16; | ||||
| #endif | ||||
|   }; | ||||
| 
 | ||||
|   uint_reg_t word; | ||||
| } MPIPE_EDMA_POST_REGION_VAL_t; | ||||
| 
 | ||||
| /*
 | ||||
|  * Load Balancer Bucket Status Data. | ||||
|  * Read/Write data for load balancer Bucket-Status Table. 4160 entries | ||||
|  * indexed by LBL_INIT_CTL.IDX when LBL_INIT_CTL.STRUCT_SEL is BSTS_TBL | ||||
|  */ | ||||
| 
 | ||||
| __extension__ | ||||
| typedef union | ||||
| { | ||||
|   struct | ||||
|   { | ||||
| #ifndef __BIG_ENDIAN__ | ||||
|     /* NotifRing currently assigned to this bucket. */ | ||||
|     uint_reg_t notifring  : 8; | ||||
|     /* Current reference count. */ | ||||
|     uint_reg_t count      : 16; | ||||
|     /* Group associated with this bucket. */ | ||||
|     uint_reg_t group      : 5; | ||||
|     /* Mode select for this bucket. */ | ||||
|     uint_reg_t mode       : 3; | ||||
|     /* Reserved. */ | ||||
|     uint_reg_t __reserved : 32; | ||||
| #else   /* __BIG_ENDIAN__ */ | ||||
|     uint_reg_t __reserved : 32; | ||||
|     uint_reg_t mode       : 3; | ||||
|     uint_reg_t group      : 5; | ||||
|     uint_reg_t count      : 16; | ||||
|     uint_reg_t notifring  : 8; | ||||
| #endif | ||||
|   }; | ||||
| 
 | ||||
|   uint_reg_t word; | ||||
| } MPIPE_LBL_INIT_DAT_BSTS_TBL_t; | ||||
| #endif /* !defined(__ASSEMBLER__) */ | ||||
| 
 | ||||
| #endif /* !defined(__ARCH_MPIPE_H__) */ | ||||
							
								
								
									
										42
									
								
								arch/tile/include/arch/mpipe_constants.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										42
									
								
								arch/tile/include/arch/mpipe_constants.h
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,42 @@ | ||||
| /*
 | ||||
|  * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||||
|  * | ||||
|  *   This program is free software; you can redistribute it and/or | ||||
|  *   modify it under the terms of the GNU General Public License | ||||
|  *   as published by the Free Software Foundation, version 2. | ||||
|  * | ||||
|  *   This program is distributed in the hope that it will be useful, but | ||||
|  *   WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||||
|  *   NON INFRINGEMENT.  See the GNU General Public License for | ||||
|  *   more details. | ||||
|  */ | ||||
| 
 | ||||
| 
 | ||||
| #ifndef __ARCH_MPIPE_CONSTANTS_H__ | ||||
| #define __ARCH_MPIPE_CONSTANTS_H__ | ||||
| 
 | ||||
| #define MPIPE_NUM_CLASSIFIERS 10 | ||||
| #define MPIPE_CLS_MHZ 1200 | ||||
| 
 | ||||
| #define MPIPE_NUM_EDMA_RINGS 32 | ||||
| 
 | ||||
| #define MPIPE_NUM_SGMII_MACS 16 | ||||
| #define MPIPE_NUM_XAUI_MACS 4 | ||||
| #define MPIPE_NUM_LOOPBACK_CHANNELS 4 | ||||
| #define MPIPE_NUM_NON_LB_CHANNELS 28 | ||||
| 
 | ||||
| #define MPIPE_NUM_IPKT_BLOCKS 1536 | ||||
| 
 | ||||
| #define MPIPE_NUM_BUCKETS 4160 | ||||
| 
 | ||||
| #define MPIPE_NUM_NOTIF_RINGS 256 | ||||
| 
 | ||||
| #define MPIPE_NUM_NOTIF_GROUPS 32 | ||||
| 
 | ||||
| #define MPIPE_NUM_TLBS_PER_ASID 16 | ||||
| #define MPIPE_TLB_IDX_WIDTH 4 | ||||
| 
 | ||||
| #define MPIPE_MMIO_NUM_SVC_DOM 32 | ||||
| 
 | ||||
| #endif /* __ARCH_MPIPE_CONSTANTS_H__ */ | ||||
							
								
								
									
										39
									
								
								arch/tile/include/arch/mpipe_def.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										39
									
								
								arch/tile/include/arch/mpipe_def.h
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,39 @@ | ||||
| /*
 | ||||
|  * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||||
|  * | ||||
|  *   This program is free software; you can redistribute it and/or | ||||
|  *   modify it under the terms of the GNU General Public License | ||||
|  *   as published by the Free Software Foundation, version 2. | ||||
|  * | ||||
|  *   This program is distributed in the hope that it will be useful, but | ||||
|  *   WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||||
|  *   NON INFRINGEMENT.  See the GNU General Public License for | ||||
|  *   more details. | ||||
|  */ | ||||
| 
 | ||||
| /* Machine-generated file; do not edit. */ | ||||
| 
 | ||||
| #ifndef __ARCH_MPIPE_DEF_H__ | ||||
| #define __ARCH_MPIPE_DEF_H__ | ||||
| #define MPIPE_MMIO_ADDR__REGION_SHIFT 26 | ||||
| #define MPIPE_MMIO_ADDR__REGION_VAL_CFG 0x0 | ||||
| #define MPIPE_MMIO_ADDR__REGION_VAL_IDMA 0x4 | ||||
| #define MPIPE_MMIO_ADDR__REGION_VAL_EDMA 0x5 | ||||
| #define MPIPE_MMIO_ADDR__REGION_VAL_BSM 0x6 | ||||
| #define MPIPE_BSM_REGION_VAL__VA_SHIFT 7 | ||||
| #define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_128 0x0 | ||||
| #define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_256 0x1 | ||||
| #define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_512 0x2 | ||||
| #define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_1024 0x3 | ||||
| #define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_1664 0x4 | ||||
| #define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_4096 0x5 | ||||
| #define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_10368 0x6 | ||||
| #define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_16384 0x7 | ||||
| #define MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_DFA 0x0 | ||||
| #define MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_FIXED 0x1 | ||||
| #define MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_ALWAYS_PICK 0x2 | ||||
| #define MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_STICKY 0x3 | ||||
| #define MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_STICKY_RAND 0x7 | ||||
| #define MPIPE_LBL_NR_STATE__FIRST_WORD 0x2138 | ||||
| #endif /* !defined(__ARCH_MPIPE_DEF_H__) */ | ||||
							
								
								
									
										509
									
								
								arch/tile/include/arch/mpipe_shm.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										509
									
								
								arch/tile/include/arch/mpipe_shm.h
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,509 @@ | ||||
| /*
 | ||||
|  * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||||
|  * | ||||
|  *   This program is free software; you can redistribute it and/or | ||||
|  *   modify it under the terms of the GNU General Public License | ||||
|  *   as published by the Free Software Foundation, version 2. | ||||
|  * | ||||
|  *   This program is distributed in the hope that it will be useful, but | ||||
|  *   WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||||
|  *   NON INFRINGEMENT.  See the GNU General Public License for | ||||
|  *   more details. | ||||
|  */ | ||||
| 
 | ||||
| /* Machine-generated file; do not edit. */ | ||||
| 
 | ||||
| 
 | ||||
| #ifndef __ARCH_MPIPE_SHM_H__ | ||||
| #define __ARCH_MPIPE_SHM_H__ | ||||
| 
 | ||||
| #include <arch/abi.h> | ||||
| #include <arch/mpipe_shm_def.h> | ||||
| 
 | ||||
| #ifndef __ASSEMBLER__ | ||||
| /**
 | ||||
|  * MPIPE eDMA Descriptor. | ||||
|  * The eDMA descriptor is written by software and consumed by hardware.  It | ||||
|  * is used to specify the location of egress packet data to be sent out of | ||||
|  * the chip via one of the packet interfaces. | ||||
|  */ | ||||
| 
 | ||||
| __extension__ | ||||
| typedef union | ||||
| { | ||||
|   struct | ||||
|   { | ||||
|     /* Word 0 */ | ||||
| 
 | ||||
| #ifndef __BIG_ENDIAN__ | ||||
|     /**
 | ||||
|      * Generation number.  Used to indicate a valid descriptor in ring.  When | ||||
|      * a new descriptor is written into the ring, software must toggle this | ||||
|      * bit.  The net effect is that the GEN bit being written into new | ||||
|      * descriptors toggles each time the ring tail pointer wraps. | ||||
|      */ | ||||
|     uint_reg_t gen        : 1; | ||||
|     /** Reserved.  Must be zero. */ | ||||
|     uint_reg_t r0         : 7; | ||||
|     /** Checksum generation enabled for this transfer. */ | ||||
|     uint_reg_t csum       : 1; | ||||
|     /**
 | ||||
|      * Nothing to be sent.  Used, for example, when software has dropped a | ||||
|      * packet but still wishes to return all of the associated buffers. | ||||
|      */ | ||||
|     uint_reg_t ns         : 1; | ||||
|     /**
 | ||||
|      * Notification interrupt will be delivered when packet has been egressed. | ||||
|      */ | ||||
|     uint_reg_t notif      : 1; | ||||
|     /**
 | ||||
|      * Boundary indicator.  When 1, this transfer includes the EOP for this | ||||
|      * command.  Must be clear on all but the last descriptor for an egress | ||||
|      * packet. | ||||
|      */ | ||||
|     uint_reg_t bound      : 1; | ||||
|     /** Reserved.  Must be zero. */ | ||||
|     uint_reg_t r1         : 4; | ||||
|     /**
 | ||||
|      * Number of bytes to be sent for this descriptor.  When zero, no data | ||||
|      * will be moved and the buffer descriptor will be ignored.  If the | ||||
|      * buffer descriptor indicates that it is chained, the low 7 bits of the | ||||
|      * VA indicate the offset within the first buffer (e.g. 127 bytes is the | ||||
|      * maximum offset into the first buffer).  If the size exceeds a single | ||||
|      * buffer, subsequent buffer descriptors will be fetched prior to | ||||
|      * processing the next eDMA descriptor in the ring. | ||||
|      */ | ||||
|     uint_reg_t xfer_size  : 14; | ||||
|     /** Reserved.  Must be zero. */ | ||||
|     uint_reg_t r2         : 2; | ||||
|     /**
 | ||||
|      * Destination of checksum relative to CSUM_START relative to the first | ||||
|      * byte moved by this descriptor.  Must be zero if CSUM=0 in this | ||||
|      * descriptor.  Must be less than XFER_SIZE (e.g. the first byte of the | ||||
|      * CSUM_DEST must be within the span of this descriptor). | ||||
|      */ | ||||
|     uint_reg_t csum_dest  : 8; | ||||
|     /**
 | ||||
|      * Start byte of checksum relative to the first byte moved by this | ||||
|      * descriptor.  If this is not the first descriptor for the egress | ||||
|      * packet, CSUM_START is still relative to the first byte in this | ||||
|      * descriptor.  Must be zero if CSUM=0 in this descriptor. | ||||
|      */ | ||||
|     uint_reg_t csum_start : 8; | ||||
|     /**
 | ||||
|      * Initial value for 16-bit 1's compliment checksum if enabled via CSUM. | ||||
|      * Specified in network order.  That is, bits[7:0] will be added to the | ||||
|      * byte pointed to by CSUM_START and bits[15:8] will be added to the byte | ||||
|      * pointed to by CSUM_START+1 (with appropriate 1's compliment carries). | ||||
|      * Must be zero if CSUM=0 in this descriptor. | ||||
|      */ | ||||
|     uint_reg_t csum_seed  : 16; | ||||
| #else   /* __BIG_ENDIAN__ */ | ||||
|     uint_reg_t csum_seed  : 16; | ||||
|     uint_reg_t csum_start : 8; | ||||
|     uint_reg_t csum_dest  : 8; | ||||
|     uint_reg_t r2         : 2; | ||||
|     uint_reg_t xfer_size  : 14; | ||||
|     uint_reg_t r1         : 4; | ||||
|     uint_reg_t bound      : 1; | ||||
|     uint_reg_t notif      : 1; | ||||
|     uint_reg_t ns         : 1; | ||||
|     uint_reg_t csum       : 1; | ||||
|     uint_reg_t r0         : 7; | ||||
|     uint_reg_t gen        : 1; | ||||
| #endif | ||||
| 
 | ||||
|     /* Word 1 */ | ||||
| 
 | ||||
| #ifndef __BIG_ENDIAN__ | ||||
|     /** Virtual address.  Must be sign extended by consumer. */ | ||||
|     int_reg_t va           : 42; | ||||
|     /** Reserved. */ | ||||
|     uint_reg_t __reserved_0 : 6; | ||||
|     /** Index of the buffer stack to which this buffer belongs. */ | ||||
|     uint_reg_t stack_idx    : 5; | ||||
|     /** Reserved. */ | ||||
|     uint_reg_t __reserved_1 : 3; | ||||
|     /**
 | ||||
|      * Instance ID.  For devices that support more than one mPIPE instance, | ||||
|      * this field indicates the buffer owner.  If the INST field does not | ||||
|      * match the mPIPE's instance number when a packet is egressed, buffers | ||||
|      * with HWB set will be returned to the other mPIPE instance. | ||||
|      */ | ||||
|     uint_reg_t inst         : 1; | ||||
|     /** Reserved. */ | ||||
|     uint_reg_t __reserved_2 : 1; | ||||
|     /**
 | ||||
|      * Always set to one by hardware in iDMA packet descriptors.  For eDMA, | ||||
|      * indicates whether the buffer will be released to the buffer stack | ||||
|      * manager.  When 0, software is responsible for releasing the buffer. | ||||
|      */ | ||||
|     uint_reg_t hwb          : 1; | ||||
|     /**
 | ||||
|      * Encoded size of buffer.  Set by the ingress hardware for iDMA packet | ||||
|      * descriptors.  For eDMA descriptors, indicates the buffer size if .c | ||||
|      * indicates a chained packet.  If an eDMA descriptor is not chained and | ||||
|      * the .hwb bit is not set, this field is ignored and the size is | ||||
|      * specified by the .xfer_size field. | ||||
|      * 0 = 128 bytes | ||||
|      * 1 = 256 bytes | ||||
|      * 2 = 512 bytes | ||||
|      * 3 = 1024 bytes | ||||
|      * 4 = 1664 bytes | ||||
|      * 5 = 4096 bytes | ||||
|      * 6 = 10368 bytes | ||||
|      * 7 = 16384 bytes | ||||
|      */ | ||||
|     uint_reg_t size         : 3; | ||||
|     /**
 | ||||
|      * Chaining configuration for the buffer.  Indicates that an ingress | ||||
|      * packet or egress command is chained across multiple buffers, with each | ||||
|      * buffer's size indicated by the .size field. | ||||
|      */ | ||||
|     uint_reg_t c            : 2; | ||||
| #else   /* __BIG_ENDIAN__ */ | ||||
|     uint_reg_t c            : 2; | ||||
|     uint_reg_t size         : 3; | ||||
|     uint_reg_t hwb          : 1; | ||||
|     uint_reg_t __reserved_2 : 1; | ||||
|     uint_reg_t inst         : 1; | ||||
|     uint_reg_t __reserved_1 : 3; | ||||
|     uint_reg_t stack_idx    : 5; | ||||
|     uint_reg_t __reserved_0 : 6; | ||||
|     int_reg_t va           : 42; | ||||
| #endif | ||||
| 
 | ||||
|   }; | ||||
| 
 | ||||
|   /** Word access */ | ||||
|   uint_reg_t words[2]; | ||||
| } MPIPE_EDMA_DESC_t; | ||||
| 
 | ||||
| /**
 | ||||
|  * MPIPE Packet Descriptor. | ||||
|  * The packet descriptor is filled by the mPIPE's classification, | ||||
|  * load-balancing, and buffer management services.  Some fields are consumed | ||||
|  * by mPIPE hardware, and others are consumed by Tile software. | ||||
|  */ | ||||
| 
 | ||||
| __extension__ | ||||
| typedef union | ||||
| { | ||||
|   struct | ||||
|   { | ||||
|     /* Word 0 */ | ||||
| 
 | ||||
| #ifndef __BIG_ENDIAN__ | ||||
|     /**
 | ||||
|      * Notification ring into which this packet descriptor is written. | ||||
|      * Typically written by load balancer, but can be overridden by | ||||
|      * classification program if NR is asserted. | ||||
|      */ | ||||
|     uint_reg_t notif_ring   : 8; | ||||
|     /** Source channel for this packet.  Written by mPIPE DMA hardware. */ | ||||
|     uint_reg_t channel      : 5; | ||||
|     /** Reserved. */ | ||||
|     uint_reg_t __reserved_0 : 1; | ||||
|     /**
 | ||||
|      * MAC Error. | ||||
|      * Generated by the MAC interface.  Asserted if there was an overrun of | ||||
|      * the MAC's receive FIFO.  This condition generally only occurs if the | ||||
|      * mPIPE clock is running too slowly. | ||||
|      */ | ||||
|     uint_reg_t me           : 1; | ||||
|     /**
 | ||||
|      * Truncation Error. | ||||
|      * Written by the iDMA hardware.  Asserted if packet was truncated due to | ||||
|      * insufficient space in iPkt buffer | ||||
|      */ | ||||
|     uint_reg_t tr           : 1; | ||||
|     /**
 | ||||
|      * Written by the iDMA hardware.  Indicates the number of bytes written | ||||
|      * to Tile memory.  In general, this is the actual size of the packet as | ||||
|      * received from the MAC.  But if the packet is truncated due to running | ||||
|      * out of buffers or due to the iPkt buffer filling up, then the L2_SIZE | ||||
|      * will be reduced to reflect the actual number of valid bytes written to | ||||
|      * Tile memory. | ||||
|      */ | ||||
|     uint_reg_t l2_size      : 14; | ||||
|     /**
 | ||||
|      * CRC Error. | ||||
|      * Generated by the MAC.  Asserted if MAC indicated an L2 CRC error or | ||||
|      * other L2 error (bad length etc.) on the packet. | ||||
|      */ | ||||
|     uint_reg_t ce           : 1; | ||||
|     /**
 | ||||
|      * Cut Through. | ||||
|      * Written by the iDMA hardware.  Asserted if packet was not completely | ||||
|      * received before being sent to classifier.  L2_Size will indicate | ||||
|      * number of bytes received so far. | ||||
|      */ | ||||
|     uint_reg_t ct           : 1; | ||||
|     /**
 | ||||
|      * Written by the classification program.  Used by the load balancer to | ||||
|      * select the ring into which this packet descriptor is written. | ||||
|      */ | ||||
|     uint_reg_t bucket_id    : 13; | ||||
|     /** Reserved. */ | ||||
|     uint_reg_t __reserved_1 : 3; | ||||
|     /**
 | ||||
|      * Checksum. | ||||
|      * Written by classification program.  When 1, the checksum engine will | ||||
|      * perform checksum based on the CSUM_SEED, CSUM_START, and CSUM_BYTES | ||||
|      * fields.  The result will be placed in CSUM_VAL. | ||||
|      */ | ||||
|     uint_reg_t cs           : 1; | ||||
|     /**
 | ||||
|      * Notification Ring Select. | ||||
|      * Written by the classification program.  When 1, the NotifRingIDX is | ||||
|      * set by classification program rather than being set by load balancer. | ||||
|      */ | ||||
|     uint_reg_t nr           : 1; | ||||
|     /**
 | ||||
|      * Written by classification program.  Indicates whether packet and | ||||
|      * descriptor should both be dropped, both be delivered, or only the | ||||
|      * descriptor should be delivered. | ||||
|      */ | ||||
|     uint_reg_t dest         : 2; | ||||
|     /**
 | ||||
|      * General Purpose Sequence Number Enable. | ||||
|      * Written by the classification program.  When 1, the GP_SQN_SEL field | ||||
|      * contains the sequence number selector and the GP_SQN field will be | ||||
|      * replaced with the associated sequence number.  When clear, the GP_SQN | ||||
|      * field is left intact and be used as "Custom" bytes. | ||||
|      */ | ||||
|     uint_reg_t sq           : 1; | ||||
|     /**
 | ||||
|      * TimeStamp Enable. | ||||
|      * Enable TimeStamp insertion.  When clear, timestamp field may be filled | ||||
|      * with custom data by classifier.  When set, hardware inserts the | ||||
|      * timestamp when the start of packet is received from the MAC. | ||||
|      */ | ||||
|     uint_reg_t ts           : 1; | ||||
|     /**
 | ||||
|      * Packet Sequence Number Enable. | ||||
|      * Enable PacketSQN insertion.  When clear, PacketSQN field may be filled | ||||
|      * with custom data by classifier.  When set, hardware inserts the packet | ||||
|      * sequence number when the packet descriptor is written to a | ||||
|      * notification ring. | ||||
|      */ | ||||
|     uint_reg_t ps           : 1; | ||||
|     /**
 | ||||
|      * Buffer Error. | ||||
|      * Written by the iDMA hardware.  Asserted if iDMA ran out of buffers | ||||
|      * while writing the packet. Software must still return any buffer | ||||
|      * descriptors whose C field indicates a valid descriptor was consumed. | ||||
|      */ | ||||
|     uint_reg_t be           : 1; | ||||
|     /**
 | ||||
|      * Written by  the classification program.  The associated counter is | ||||
|      * incremented when the packet is sent. | ||||
|      */ | ||||
|     uint_reg_t ctr0         : 5; | ||||
|     /** Reserved. */ | ||||
|     uint_reg_t __reserved_2 : 3; | ||||
| #else   /* __BIG_ENDIAN__ */ | ||||
|     uint_reg_t __reserved_2 : 3; | ||||
|     uint_reg_t ctr0         : 5; | ||||
|     uint_reg_t be           : 1; | ||||
|     uint_reg_t ps           : 1; | ||||
|     uint_reg_t ts           : 1; | ||||
|     uint_reg_t sq           : 1; | ||||
|     uint_reg_t dest         : 2; | ||||
|     uint_reg_t nr           : 1; | ||||
|     uint_reg_t cs           : 1; | ||||
|     uint_reg_t __reserved_1 : 3; | ||||
|     uint_reg_t bucket_id    : 13; | ||||
|     uint_reg_t ct           : 1; | ||||
|     uint_reg_t ce           : 1; | ||||
|     uint_reg_t l2_size      : 14; | ||||
|     uint_reg_t tr           : 1; | ||||
|     uint_reg_t me           : 1; | ||||
|     uint_reg_t __reserved_0 : 1; | ||||
|     uint_reg_t channel      : 5; | ||||
|     uint_reg_t notif_ring   : 8; | ||||
| #endif | ||||
| 
 | ||||
|     /* Word 1 */ | ||||
| 
 | ||||
| #ifndef __BIG_ENDIAN__ | ||||
|     /**
 | ||||
|      * Written by  the classification program.  The associated counter is | ||||
|      * incremented when the packet is sent. | ||||
|      */ | ||||
|     uint_reg_t ctr1          : 5; | ||||
|     /** Reserved. */ | ||||
|     uint_reg_t __reserved_3  : 3; | ||||
|     /**
 | ||||
|      * Written by classification program.  Indicates the start byte for | ||||
|      * checksum.  Relative to 1st byte received from MAC. | ||||
|      */ | ||||
|     uint_reg_t csum_start    : 8; | ||||
|     /**
 | ||||
|      * Checksum seed written by classification program.  Overwritten with | ||||
|      * resultant checksum if CS bit is asserted.  The endianness of the CSUM | ||||
|      * value bits when viewed by Tile software match the packet byte order. | ||||
|      * That is, bits[7:0] of the resulting checksum value correspond to | ||||
|      * earlier (more significant) bytes in the packet.  To avoid classifier | ||||
|      * software from having to byte swap the CSUM_SEED, the iDMA checksum | ||||
|      * engine byte swaps the classifier's result before seeding the checksum | ||||
|      * calculation.  Thus, the CSUM_START byte of packet data is added to | ||||
|      * bits[15:8] of the CSUM_SEED field generated by the classifier.  This | ||||
|      * byte swap will be visible to Tile software if the CS bit is clear. | ||||
|      */ | ||||
|     uint_reg_t csum_seed_val : 16; | ||||
|     /**
 | ||||
|      * Written by  the classification program.  Not interpreted by mPIPE | ||||
|      * hardware. | ||||
|      */ | ||||
|     uint_reg_t custom0       : 32; | ||||
| #else   /* __BIG_ENDIAN__ */ | ||||
|     uint_reg_t custom0       : 32; | ||||
|     uint_reg_t csum_seed_val : 16; | ||||
|     uint_reg_t csum_start    : 8; | ||||
|     uint_reg_t __reserved_3  : 3; | ||||
|     uint_reg_t ctr1          : 5; | ||||
| #endif | ||||
| 
 | ||||
|     /* Word 2 */ | ||||
| 
 | ||||
| #ifndef __BIG_ENDIAN__ | ||||
|     /**
 | ||||
|      * Written by  the classification program.  Not interpreted by mPIPE | ||||
|      * hardware. | ||||
|      */ | ||||
|     uint_reg_t custom1 : 64; | ||||
| #else   /* __BIG_ENDIAN__ */ | ||||
|     uint_reg_t custom1 : 64; | ||||
| #endif | ||||
| 
 | ||||
|     /* Word 3 */ | ||||
| 
 | ||||
| #ifndef __BIG_ENDIAN__ | ||||
|     /**
 | ||||
|      * Written by  the classification program.  Not interpreted by mPIPE | ||||
|      * hardware. | ||||
|      */ | ||||
|     uint_reg_t custom2 : 64; | ||||
| #else   /* __BIG_ENDIAN__ */ | ||||
|     uint_reg_t custom2 : 64; | ||||
| #endif | ||||
| 
 | ||||
|     /* Word 4 */ | ||||
| 
 | ||||
| #ifndef __BIG_ENDIAN__ | ||||
|     /**
 | ||||
|      * Written by  the classification program.  Not interpreted by mPIPE | ||||
|      * hardware. | ||||
|      */ | ||||
|     uint_reg_t custom3 : 64; | ||||
| #else   /* __BIG_ENDIAN__ */ | ||||
|     uint_reg_t custom3 : 64; | ||||
| #endif | ||||
| 
 | ||||
|     /* Word 5 */ | ||||
| 
 | ||||
| #ifndef __BIG_ENDIAN__ | ||||
|     /**
 | ||||
|      * Sequence number applied when packet is distributed.   Classifier | ||||
|      * selects which sequence number is to be applied by writing the 13-bit | ||||
|      * SQN-selector into this field. | ||||
|      */ | ||||
|     uint_reg_t gp_sqn     : 16; | ||||
|     /**
 | ||||
|      * Written by notification hardware.  The packet sequence number is | ||||
|      * incremented for each packet that wasn't dropped. | ||||
|      */ | ||||
|     uint_reg_t packet_sqn : 48; | ||||
| #else   /* __BIG_ENDIAN__ */ | ||||
|     uint_reg_t packet_sqn : 48; | ||||
|     uint_reg_t gp_sqn     : 16; | ||||
| #endif | ||||
| 
 | ||||
|     /* Word 6 */ | ||||
| 
 | ||||
| #ifndef __BIG_ENDIAN__ | ||||
|     /**
 | ||||
|      * Written by hardware when the start-of-packet is received by the mPIPE | ||||
|      * from the MAC.  This is the nanoseconds part of the packet timestamp. | ||||
|      */ | ||||
|     uint_reg_t time_stamp_ns  : 32; | ||||
|     /**
 | ||||
|      * Written by hardware when the start-of-packet is received by the mPIPE | ||||
|      * from the MAC.  This is the seconds part of the packet timestamp. | ||||
|      */ | ||||
|     uint_reg_t time_stamp_sec : 32; | ||||
| #else   /* __BIG_ENDIAN__ */ | ||||
|     uint_reg_t time_stamp_sec : 32; | ||||
|     uint_reg_t time_stamp_ns  : 32; | ||||
| #endif | ||||
| 
 | ||||
|     /* Word 7 */ | ||||
| 
 | ||||
| #ifndef __BIG_ENDIAN__ | ||||
|     /** Virtual address.  Must be sign extended by consumer. */ | ||||
|     int_reg_t va           : 42; | ||||
|     /** Reserved. */ | ||||
|     uint_reg_t __reserved_4 : 6; | ||||
|     /** Index of the buffer stack to which this buffer belongs. */ | ||||
|     uint_reg_t stack_idx    : 5; | ||||
|     /** Reserved. */ | ||||
|     uint_reg_t __reserved_5 : 3; | ||||
|     /**
 | ||||
|      * Instance ID.  For devices that support more than one mPIPE instance, | ||||
|      * this field indicates the buffer owner.  If the INST field does not | ||||
|      * match the mPIPE's instance number when a packet is egressed, buffers | ||||
|      * with HWB set will be returned to the other mPIPE instance. | ||||
|      */ | ||||
|     uint_reg_t inst         : 1; | ||||
|     /** Reserved. */ | ||||
|     uint_reg_t __reserved_6 : 1; | ||||
|     /**
 | ||||
|      * Always set to one by hardware in iDMA packet descriptors.  For eDMA, | ||||
|      * indicates whether the buffer will be released to the buffer stack | ||||
|      * manager.  When 0, software is responsible for releasing the buffer. | ||||
|      */ | ||||
|     uint_reg_t hwb          : 1; | ||||
|     /**
 | ||||
|      * Encoded size of buffer.  Set by the ingress hardware for iDMA packet | ||||
|      * descriptors.  For eDMA descriptors, indicates the buffer size if .c | ||||
|      * indicates a chained packet.  If an eDMA descriptor is not chained and | ||||
|      * the .hwb bit is not set, this field is ignored and the size is | ||||
|      * specified by the .xfer_size field. | ||||
|      * 0 = 128 bytes | ||||
|      * 1 = 256 bytes | ||||
|      * 2 = 512 bytes | ||||
|      * 3 = 1024 bytes | ||||
|      * 4 = 1664 bytes | ||||
|      * 5 = 4096 bytes | ||||
|      * 6 = 10368 bytes | ||||
|      * 7 = 16384 bytes | ||||
|      */ | ||||
|     uint_reg_t size         : 3; | ||||
|     /**
 | ||||
|      * Chaining configuration for the buffer.  Indicates that an ingress | ||||
|      * packet or egress command is chained across multiple buffers, with each | ||||
|      * buffer's size indicated by the .size field. | ||||
|      */ | ||||
|     uint_reg_t c            : 2; | ||||
| #else   /* __BIG_ENDIAN__ */ | ||||
|     uint_reg_t c            : 2; | ||||
|     uint_reg_t size         : 3; | ||||
|     uint_reg_t hwb          : 1; | ||||
|     uint_reg_t __reserved_6 : 1; | ||||
|     uint_reg_t inst         : 1; | ||||
|     uint_reg_t __reserved_5 : 3; | ||||
|     uint_reg_t stack_idx    : 5; | ||||
|     uint_reg_t __reserved_4 : 6; | ||||
|     int_reg_t va           : 42; | ||||
| #endif | ||||
| 
 | ||||
|   }; | ||||
| 
 | ||||
|   /** Word access */ | ||||
|   uint_reg_t words[8]; | ||||
| } MPIPE_PDESC_t; | ||||
| #endif /* !defined(__ASSEMBLER__) */ | ||||
| 
 | ||||
| #endif /* !defined(__ARCH_MPIPE_SHM_H__) */ | ||||
							
								
								
									
										23
									
								
								arch/tile/include/arch/mpipe_shm_def.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										23
									
								
								arch/tile/include/arch/mpipe_shm_def.h
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,23 @@ | ||||
| /*
 | ||||
|  * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||||
|  * | ||||
|  *   This program is free software; you can redistribute it and/or | ||||
|  *   modify it under the terms of the GNU General Public License | ||||
|  *   as published by the Free Software Foundation, version 2. | ||||
|  * | ||||
|  *   This program is distributed in the hope that it will be useful, but | ||||
|  *   WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||||
|  *   NON INFRINGEMENT.  See the GNU General Public License for | ||||
|  *   more details. | ||||
|  */ | ||||
| 
 | ||||
| /* Machine-generated file; do not edit. */ | ||||
| 
 | ||||
| #ifndef __ARCH_MPIPE_SHM_DEF_H__ | ||||
| #define __ARCH_MPIPE_SHM_DEF_H__ | ||||
| #define MPIPE_EDMA_DESC_WORD1__C_VAL_UNCHAINED 0x0 | ||||
| #define MPIPE_EDMA_DESC_WORD1__C_VAL_CHAINED 0x1 | ||||
| #define MPIPE_EDMA_DESC_WORD1__C_VAL_NOT_RDY 0x2 | ||||
| #define MPIPE_EDMA_DESC_WORD1__C_VAL_INVALID 0x3 | ||||
| #endif /* !defined(__ARCH_MPIPE_SHM_DEF_H__) */ | ||||
							
								
								
									
										72
									
								
								arch/tile/include/arch/trio.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										72
									
								
								arch/tile/include/arch/trio.h
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,72 @@ | ||||
| /*
 | ||||
|  * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||||
|  * | ||||
|  *   This program is free software; you can redistribute it and/or | ||||
|  *   modify it under the terms of the GNU General Public License | ||||
|  *   as published by the Free Software Foundation, version 2. | ||||
|  * | ||||
|  *   This program is distributed in the hope that it will be useful, but | ||||
|  *   WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||||
|  *   NON INFRINGEMENT.  See the GNU General Public License for | ||||
|  *   more details. | ||||
|  */ | ||||
| 
 | ||||
| /* Machine-generated file; do not edit. */ | ||||
| 
 | ||||
| #ifndef __ARCH_TRIO_H__ | ||||
| #define __ARCH_TRIO_H__ | ||||
| 
 | ||||
| #include <arch/abi.h> | ||||
| #include <arch/trio_def.h> | ||||
| 
 | ||||
| #ifndef __ASSEMBLER__ | ||||
| 
 | ||||
| /*
 | ||||
|  * Tile PIO Region Configuration - CFG Address Format. | ||||
|  * This register describes the address format for PIO accesses when the | ||||
|  * associated region is setup with TYPE=CFG. | ||||
|  */ | ||||
| 
 | ||||
| __extension__ | ||||
| typedef union | ||||
| { | ||||
|   struct | ||||
|   { | ||||
| #ifndef __BIG_ENDIAN__ | ||||
|     /* Register Address (full byte address). */ | ||||
|     uint_reg_t reg_addr     : 12; | ||||
|     /* Function Number */ | ||||
|     uint_reg_t fn           : 3; | ||||
|     /* Device Number */ | ||||
|     uint_reg_t dev          : 5; | ||||
|     /* BUS Number */ | ||||
|     uint_reg_t bus          : 8; | ||||
|     /* Config Type: 0 for access to directly-attached device.  1 otherwise. */ | ||||
|     uint_reg_t type         : 1; | ||||
|     /* Reserved. */ | ||||
|     uint_reg_t __reserved_0 : 1; | ||||
|     /*
 | ||||
|      * MAC select.  This must match the configuration in | ||||
|      * TILE_PIO_REGION_SETUP.MAC. | ||||
|      */ | ||||
|     uint_reg_t mac          : 2; | ||||
|     /* Reserved. */ | ||||
|     uint_reg_t __reserved_1 : 32; | ||||
| #else   /* __BIG_ENDIAN__ */ | ||||
|     uint_reg_t __reserved_1 : 32; | ||||
|     uint_reg_t mac          : 2; | ||||
|     uint_reg_t __reserved_0 : 1; | ||||
|     uint_reg_t type         : 1; | ||||
|     uint_reg_t bus          : 8; | ||||
|     uint_reg_t dev          : 5; | ||||
|     uint_reg_t fn           : 3; | ||||
|     uint_reg_t reg_addr     : 12; | ||||
| #endif | ||||
|   }; | ||||
| 
 | ||||
|   uint_reg_t word; | ||||
| } TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR_t; | ||||
| #endif /* !defined(__ASSEMBLER__) */ | ||||
| 
 | ||||
| #endif /* !defined(__ARCH_TRIO_H__) */ | ||||
							
								
								
									
										36
									
								
								arch/tile/include/arch/trio_constants.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										36
									
								
								arch/tile/include/arch/trio_constants.h
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,36 @@ | ||||
| /*
 | ||||
|  * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||||
|  * | ||||
|  *   This program is free software; you can redistribute it and/or | ||||
|  *   modify it under the terms of the GNU General Public License | ||||
|  *   as published by the Free Software Foundation, version 2. | ||||
|  * | ||||
|  *   This program is distributed in the hope that it will be useful, but | ||||
|  *   WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||||
|  *   NON INFRINGEMENT.  See the GNU General Public License for | ||||
|  *   more details. | ||||
|  */ | ||||
| 
 | ||||
| 
 | ||||
| #ifndef __ARCH_TRIO_CONSTANTS_H__ | ||||
| #define __ARCH_TRIO_CONSTANTS_H__ | ||||
| 
 | ||||
| #define TRIO_NUM_ASIDS 16 | ||||
| #define TRIO_NUM_TLBS_PER_ASID 16 | ||||
| 
 | ||||
| #define TRIO_NUM_TPIO_REGIONS 8 | ||||
| #define TRIO_LOG2_NUM_TPIO_REGIONS 3 | ||||
| 
 | ||||
| #define TRIO_NUM_MAP_MEM_REGIONS 16 | ||||
| #define TRIO_LOG2_NUM_MAP_MEM_REGIONS 4 | ||||
| #define TRIO_NUM_MAP_SQ_REGIONS 8 | ||||
| #define TRIO_LOG2_NUM_MAP_SQ_REGIONS 3 | ||||
| 
 | ||||
| #define TRIO_LOG2_NUM_SQ_FIFO_ENTRIES 6 | ||||
| 
 | ||||
| #define TRIO_NUM_PUSH_DMA_RINGS 32 | ||||
| 
 | ||||
| #define TRIO_NUM_PULL_DMA_RINGS 32 | ||||
| 
 | ||||
| #endif /* __ARCH_TRIO_CONSTANTS_H__ */ | ||||
							
								
								
									
										41
									
								
								arch/tile/include/arch/trio_def.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										41
									
								
								arch/tile/include/arch/trio_def.h
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,41 @@ | ||||
| /*
 | ||||
|  * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||||
|  * | ||||
|  *   This program is free software; you can redistribute it and/or | ||||
|  *   modify it under the terms of the GNU General Public License | ||||
|  *   as published by the Free Software Foundation, version 2. | ||||
|  * | ||||
|  *   This program is distributed in the hope that it will be useful, but | ||||
|  *   WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||||
|  *   NON INFRINGEMENT.  See the GNU General Public License for | ||||
|  *   more details. | ||||
|  */ | ||||
| 
 | ||||
| /* Machine-generated file; do not edit. */ | ||||
| 
 | ||||
| #ifndef __ARCH_TRIO_DEF_H__ | ||||
| #define __ARCH_TRIO_DEF_H__ | ||||
| #define TRIO_CFG_REGION_ADDR__REG_SHIFT 0 | ||||
| #define TRIO_CFG_REGION_ADDR__INTFC_SHIFT 16 | ||||
| #define TRIO_CFG_REGION_ADDR__INTFC_VAL_TRIO 0x0 | ||||
| #define TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE 0x1 | ||||
| #define TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD 0x2 | ||||
| #define TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_PROTECTED 0x3 | ||||
| #define TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT 18 | ||||
| #define TRIO_CFG_REGION_ADDR__PROT_SHIFT 20 | ||||
| #define TRIO_PIO_REGIONS_ADDR__REGION_SHIFT 32 | ||||
| #define TRIO_MAP_MEM_REG_INT0 0x1000000000 | ||||
| #define TRIO_MAP_MEM_REG_INT1 0x1000000008 | ||||
| #define TRIO_MAP_MEM_REG_INT2 0x1000000010 | ||||
| #define TRIO_MAP_MEM_REG_INT3 0x1000000018 | ||||
| #define TRIO_MAP_MEM_REG_INT4 0x1000000020 | ||||
| #define TRIO_MAP_MEM_REG_INT5 0x1000000028 | ||||
| #define TRIO_MAP_MEM_REG_INT6 0x1000000030 | ||||
| #define TRIO_MAP_MEM_REG_INT7 0x1000000038 | ||||
| #define TRIO_MAP_MEM_LIM__ADDR_SHIFT 12 | ||||
| #define TRIO_MAP_MEM_SETUP__ORDER_MODE_VAL_UNORDERED 0x0 | ||||
| #define TRIO_MAP_MEM_SETUP__ORDER_MODE_VAL_STRICT 0x1 | ||||
| #define TRIO_MAP_MEM_SETUP__ORDER_MODE_VAL_REL_ORD 0x2 | ||||
| #define TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT 30 | ||||
| #endif /* !defined(__ARCH_TRIO_DEF_H__) */ | ||||
							
								
								
									
										229
									
								
								arch/tile/include/arch/trio_pcie_intfc.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										229
									
								
								arch/tile/include/arch/trio_pcie_intfc.h
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,229 @@ | ||||
| /*
 | ||||
|  * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||||
|  * | ||||
|  *   This program is free software; you can redistribute it and/or | ||||
|  *   modify it under the terms of the GNU General Public License | ||||
|  *   as published by the Free Software Foundation, version 2. | ||||
|  * | ||||
|  *   This program is distributed in the hope that it will be useful, but | ||||
|  *   WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||||
|  *   NON INFRINGEMENT.  See the GNU General Public License for | ||||
|  *   more details. | ||||
|  */ | ||||
| 
 | ||||
| /* Machine-generated file; do not edit. */ | ||||
| 
 | ||||
| #ifndef __ARCH_TRIO_PCIE_INTFC_H__ | ||||
| #define __ARCH_TRIO_PCIE_INTFC_H__ | ||||
| 
 | ||||
| #include <arch/abi.h> | ||||
| #include <arch/trio_pcie_intfc_def.h> | ||||
| 
 | ||||
| #ifndef __ASSEMBLER__ | ||||
| 
 | ||||
| /*
 | ||||
|  * Port Configuration. | ||||
|  * Configuration of the PCIe Port | ||||
|  */ | ||||
| 
 | ||||
| __extension__ | ||||
| typedef union | ||||
| { | ||||
|   struct | ||||
|   { | ||||
| #ifndef __BIG_ENDIAN__ | ||||
|     /* Provides the state of the strapping pins for this port. */ | ||||
|     uint_reg_t strap_state      : 3; | ||||
|     /* Reserved. */ | ||||
|     uint_reg_t __reserved_0     : 1; | ||||
|     /*
 | ||||
|      * When 1, the device type will be overridden using OVD_DEV_TYPE_VAL. | ||||
|      * When 0, the device type is determined based on the STRAP_STATE. | ||||
|      */ | ||||
|     uint_reg_t ovd_dev_type     : 1; | ||||
|     /* Provides the device type when OVD_DEV_TYPE is 1. */ | ||||
|     uint_reg_t ovd_dev_type_val : 4; | ||||
|     /* Determines how link is trained. */ | ||||
|     uint_reg_t train_mode       : 2; | ||||
|     /* Reserved. */ | ||||
|     uint_reg_t __reserved_1     : 1; | ||||
|     /*
 | ||||
|      * For PCIe, used to flip physical RX lanes that were not properly wired. | ||||
|      *  This is not the same as lane reversal which is handled automatically | ||||
|      * during link training.  When 0, RX Lane0 must be wired to the link | ||||
|      * partner (either to its Lane0 or it's LaneN).  When RX_LANE_FLIP is 1, | ||||
|      * the highest numbered lane for this port becomes Lane0 and Lane0 does | ||||
|      * NOT have to be wired to the link partner. | ||||
|      */ | ||||
|     uint_reg_t rx_lane_flip     : 1; | ||||
|     /*
 | ||||
|      * For PCIe, used to flip physical TX lanes that were not properly wired. | ||||
|      *  This is not the same as lane reversal which is handled automatically | ||||
|      * during link training.  When 0, TX Lane0 must be wired to the link | ||||
|      * partner (either to its Lane0 or it's LaneN).  When TX_LANE_FLIP is 1, | ||||
|      * the highest numbered lane for this port becomes Lane0 and Lane0 does | ||||
|      * NOT have to be wired to the link partner. | ||||
|      */ | ||||
|     uint_reg_t tx_lane_flip     : 1; | ||||
|     /*
 | ||||
|      * For StreamIO port, configures the width of the port when TRAIN_MODE is | ||||
|      * not STRAP. | ||||
|      */ | ||||
|     uint_reg_t stream_width     : 2; | ||||
|     /*
 | ||||
|      * For StreamIO port, configures the rate of the port when TRAIN_MODE is | ||||
|      * not STRAP. | ||||
|      */ | ||||
|     uint_reg_t stream_rate      : 2; | ||||
|     /* Reserved. */ | ||||
|     uint_reg_t __reserved_2     : 46; | ||||
| #else   /* __BIG_ENDIAN__ */ | ||||
|     uint_reg_t __reserved_2     : 46; | ||||
|     uint_reg_t stream_rate      : 2; | ||||
|     uint_reg_t stream_width     : 2; | ||||
|     uint_reg_t tx_lane_flip     : 1; | ||||
|     uint_reg_t rx_lane_flip     : 1; | ||||
|     uint_reg_t __reserved_1     : 1; | ||||
|     uint_reg_t train_mode       : 2; | ||||
|     uint_reg_t ovd_dev_type_val : 4; | ||||
|     uint_reg_t ovd_dev_type     : 1; | ||||
|     uint_reg_t __reserved_0     : 1; | ||||
|     uint_reg_t strap_state      : 3; | ||||
| #endif | ||||
|   }; | ||||
| 
 | ||||
|   uint_reg_t word; | ||||
| } TRIO_PCIE_INTFC_PORT_CONFIG_t; | ||||
| 
 | ||||
| /*
 | ||||
|  * Port Status. | ||||
|  * Status of the PCIe Port.  This register applies to the StreamIO port when | ||||
|  * StreamIO is enabled. | ||||
|  */ | ||||
| 
 | ||||
| __extension__ | ||||
| typedef union | ||||
| { | ||||
|   struct | ||||
|   { | ||||
| #ifndef __BIG_ENDIAN__ | ||||
|     /*
 | ||||
|      * Indicates the DL state of the port.  When 1, the port is up and ready | ||||
|      * to receive traffic. | ||||
|      */ | ||||
|     uint_reg_t dl_up        : 1; | ||||
|     /*
 | ||||
|      * Indicates the number of times the link has gone down.  Clears on read. | ||||
|      */ | ||||
|     uint_reg_t dl_down_cnt  : 7; | ||||
|     /* Indicates the SERDES PLL has spun up and is providing a valid clock. */ | ||||
|     uint_reg_t clock_ready  : 1; | ||||
|     /* Reserved. */ | ||||
|     uint_reg_t __reserved_0 : 7; | ||||
|     /* Device revision ID. */ | ||||
|     uint_reg_t device_rev   : 8; | ||||
|     /* Link state (PCIe). */ | ||||
|     uint_reg_t ltssm_state  : 6; | ||||
|     /* Link power management state (PCIe). */ | ||||
|     uint_reg_t pm_state     : 3; | ||||
|     /* Reserved. */ | ||||
|     uint_reg_t __reserved_1 : 31; | ||||
| #else   /* __BIG_ENDIAN__ */ | ||||
|     uint_reg_t __reserved_1 : 31; | ||||
|     uint_reg_t pm_state     : 3; | ||||
|     uint_reg_t ltssm_state  : 6; | ||||
|     uint_reg_t device_rev   : 8; | ||||
|     uint_reg_t __reserved_0 : 7; | ||||
|     uint_reg_t clock_ready  : 1; | ||||
|     uint_reg_t dl_down_cnt  : 7; | ||||
|     uint_reg_t dl_up        : 1; | ||||
| #endif | ||||
|   }; | ||||
| 
 | ||||
|   uint_reg_t word; | ||||
| } TRIO_PCIE_INTFC_PORT_STATUS_t; | ||||
| 
 | ||||
| /*
 | ||||
|  * Transmit FIFO Control. | ||||
|  * Contains TX FIFO thresholds.  These registers are for diagnostics purposes | ||||
|  * only.  Changing these values causes undefined behavior. | ||||
|  */ | ||||
| 
 | ||||
| __extension__ | ||||
| typedef union | ||||
| { | ||||
|   struct | ||||
|   { | ||||
| #ifndef __BIG_ENDIAN__ | ||||
|     /*
 | ||||
|      * Almost-Empty level for TX0 data.  Typically set to at least | ||||
|      * roundup(38.0*M/N) where N=tclk frequency and M=MAC symbol rate in MHz | ||||
|      * for a x4 port (250MHz). | ||||
|      */ | ||||
|     uint_reg_t tx0_data_ae_lvl : 7; | ||||
|     /* Reserved. */ | ||||
|     uint_reg_t __reserved_0    : 1; | ||||
|     /* Almost-Empty level for TX1 data. */ | ||||
|     uint_reg_t tx1_data_ae_lvl : 7; | ||||
|     /* Reserved. */ | ||||
|     uint_reg_t __reserved_1    : 1; | ||||
|     /* Almost-Full level for TX0 data. */ | ||||
|     uint_reg_t tx0_data_af_lvl : 7; | ||||
|     /* Reserved. */ | ||||
|     uint_reg_t __reserved_2    : 1; | ||||
|     /* Almost-Full level for TX1 data. */ | ||||
|     uint_reg_t tx1_data_af_lvl : 7; | ||||
|     /* Reserved. */ | ||||
|     uint_reg_t __reserved_3    : 1; | ||||
|     /* Almost-Full level for TX0 info. */ | ||||
|     uint_reg_t tx0_info_af_lvl : 5; | ||||
|     /* Reserved. */ | ||||
|     uint_reg_t __reserved_4    : 3; | ||||
|     /* Almost-Full level for TX1 info. */ | ||||
|     uint_reg_t tx1_info_af_lvl : 5; | ||||
|     /* Reserved. */ | ||||
|     uint_reg_t __reserved_5    : 3; | ||||
|     /*
 | ||||
|      * This register provides performance adjustment for high bandwidth | ||||
|      * flows.  The MAC will assert almost-full to TRIO if non-posted credits | ||||
|      * fall below this level.  Note that setting this larger than the initial | ||||
|      * PORT_CREDIT.NPH value will cause READS to never be sent.  If the | ||||
|      * initial credit value from the link partner is smaller than this value | ||||
|      * when the link comes up, the value will be reset to the initial credit | ||||
|      * value to prevent lockup. | ||||
|      */ | ||||
|     uint_reg_t min_np_credits  : 8; | ||||
|     /*
 | ||||
|      * This register provides performance adjustment for high bandwidth | ||||
|      * flows.  The MAC will assert almost-full to TRIO if posted credits fall | ||||
|      * below this level.  Note that setting this larger than the initial | ||||
|      * PORT_CREDIT.PH value will cause WRITES to never be sent.  If the | ||||
|      * initial credit value from the link partner is smaller than this value | ||||
|      * when the link comes up, the value will be reset to the initial credit | ||||
|      * value to prevent lockup. | ||||
|      */ | ||||
|     uint_reg_t min_p_credits   : 8; | ||||
| #else   /* __BIG_ENDIAN__ */ | ||||
|     uint_reg_t min_p_credits   : 8; | ||||
|     uint_reg_t min_np_credits  : 8; | ||||
|     uint_reg_t __reserved_5    : 3; | ||||
|     uint_reg_t tx1_info_af_lvl : 5; | ||||
|     uint_reg_t __reserved_4    : 3; | ||||
|     uint_reg_t tx0_info_af_lvl : 5; | ||||
|     uint_reg_t __reserved_3    : 1; | ||||
|     uint_reg_t tx1_data_af_lvl : 7; | ||||
|     uint_reg_t __reserved_2    : 1; | ||||
|     uint_reg_t tx0_data_af_lvl : 7; | ||||
|     uint_reg_t __reserved_1    : 1; | ||||
|     uint_reg_t tx1_data_ae_lvl : 7; | ||||
|     uint_reg_t __reserved_0    : 1; | ||||
|     uint_reg_t tx0_data_ae_lvl : 7; | ||||
| #endif | ||||
|   }; | ||||
| 
 | ||||
|   uint_reg_t word; | ||||
| } TRIO_PCIE_INTFC_TX_FIFO_CTL_t; | ||||
| #endif /* !defined(__ASSEMBLER__) */ | ||||
| 
 | ||||
| #endif /* !defined(__ARCH_TRIO_PCIE_INTFC_H__) */ | ||||
							
								
								
									
										32
									
								
								arch/tile/include/arch/trio_pcie_intfc_def.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										32
									
								
								arch/tile/include/arch/trio_pcie_intfc_def.h
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,32 @@ | ||||
| /*
 | ||||
|  * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||||
|  * | ||||
|  *   This program is free software; you can redistribute it and/or | ||||
|  *   modify it under the terms of the GNU General Public License | ||||
|  *   as published by the Free Software Foundation, version 2. | ||||
|  * | ||||
|  *   This program is distributed in the hope that it will be useful, but | ||||
|  *   WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||||
|  *   NON INFRINGEMENT.  See the GNU General Public License for | ||||
|  *   more details. | ||||
|  */ | ||||
| 
 | ||||
| /* Machine-generated file; do not edit. */ | ||||
| 
 | ||||
| #ifndef __ARCH_TRIO_PCIE_INTFC_DEF_H__ | ||||
| #define __ARCH_TRIO_PCIE_INTFC_DEF_H__ | ||||
| #define TRIO_PCIE_INTFC_MAC_INT_STS 0x0000 | ||||
| #define TRIO_PCIE_INTFC_MAC_INT_STS__INT_LEVEL_MASK  0xf000 | ||||
| #define TRIO_PCIE_INTFC_PORT_CONFIG 0x0018 | ||||
| #define TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_DISABLED 0x0 | ||||
| #define TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_ENDPOINT 0x1 | ||||
| #define TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_RC 0x2 | ||||
| #define TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_ENDPOINT_G1 0x3 | ||||
| #define TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_RC_G1 0x4 | ||||
| #define TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_XLINK 0x5 | ||||
| #define TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_STREAM_X1 0x6 | ||||
| #define TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_STREAM_X4 0x7 | ||||
| #define TRIO_PCIE_INTFC_PORT_STATUS 0x0020 | ||||
| #define TRIO_PCIE_INTFC_TX_FIFO_CTL 0x0050 | ||||
| #endif /* !defined(__ARCH_TRIO_PCIE_INTFC_DEF_H__) */ | ||||
							
								
								
									
										156
									
								
								arch/tile/include/arch/trio_pcie_rc.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										156
									
								
								arch/tile/include/arch/trio_pcie_rc.h
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,156 @@ | ||||
| /*
 | ||||
|  * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||||
|  * | ||||
|  *   This program is free software; you can redistribute it and/or | ||||
|  *   modify it under the terms of the GNU General Public License | ||||
|  *   as published by the Free Software Foundation, version 2. | ||||
|  * | ||||
|  *   This program is distributed in the hope that it will be useful, but | ||||
|  *   WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||||
|  *   NON INFRINGEMENT.  See the GNU General Public License for | ||||
|  *   more details. | ||||
|  */ | ||||
| 
 | ||||
| /* Machine-generated file; do not edit. */ | ||||
| 
 | ||||
| #ifndef __ARCH_TRIO_PCIE_RC_H__ | ||||
| #define __ARCH_TRIO_PCIE_RC_H__ | ||||
| 
 | ||||
| #include <arch/abi.h> | ||||
| #include <arch/trio_pcie_rc_def.h> | ||||
| 
 | ||||
| #ifndef __ASSEMBLER__ | ||||
| 
 | ||||
| /* Device Capabilities Register. */ | ||||
| 
 | ||||
| __extension__ | ||||
| typedef union | ||||
| { | ||||
|   struct | ||||
|   { | ||||
| #ifndef __BIG_ENDIAN__ | ||||
|     /*
 | ||||
|      * Max_Payload_Size Supported, writablethrough the MAC_STANDARD interface | ||||
|      */ | ||||
|     uint_reg_t mps_sup                    : 3; | ||||
|     /*
 | ||||
|      * This field is writable through the MAC_STANDARD interface.  However, | ||||
|      * Phantom Function is not  supported. Therefore, the application must | ||||
|      * not write any value other than 0x0 to this  field. | ||||
|      */ | ||||
|     uint_reg_t phantom_function_supported : 2; | ||||
|     /* This bit is writable through the MAC_STANDARD interface. */ | ||||
|     uint_reg_t ext_tag_field_supported    : 1; | ||||
|     /* Reserved. */ | ||||
|     uint_reg_t __reserved_0               : 3; | ||||
|     /* Endpoint L1 Acceptable Latency Must be 0x0 for non-Endpoint devices. */ | ||||
|     uint_reg_t l1_lat                     : 3; | ||||
|     /*
 | ||||
|      * Undefined since PCI Express 1.1 (Was Attention Button Present for PCI | ||||
|      * Express 1.0a) | ||||
|      */ | ||||
|     uint_reg_t r1                         : 1; | ||||
|     /*
 | ||||
|      * Undefined since PCI Express 1.1 (Was Attention Indicator Present for | ||||
|      * PCI  Express 1.0a) | ||||
|      */ | ||||
|     uint_reg_t r2                         : 1; | ||||
|     /*
 | ||||
|      * Undefined since PCI Express 1.1 (Was Power Indicator Present for PCI | ||||
|      * Express 1.0a) | ||||
|      */ | ||||
|     uint_reg_t r3                         : 1; | ||||
|     /*
 | ||||
|      * Role-Based Error Reporting, writable through the MAC_STANDARD | ||||
|      * interface.  Required to be set for device compliant to 1.1  spec and | ||||
|      * later. | ||||
|      */ | ||||
|     uint_reg_t rer                        : 1; | ||||
|     /* Reserved. */ | ||||
|     uint_reg_t __reserved_1               : 2; | ||||
|     /* Captured Slot Power Limit Value Upstream port only. */ | ||||
|     uint_reg_t slot_pwr_lim               : 8; | ||||
|     /* Captured Slot Power Limit Scale Upstream port only. */ | ||||
|     uint_reg_t slot_pwr_scale             : 2; | ||||
|     /* Reserved. */ | ||||
|     uint_reg_t __reserved_2               : 4; | ||||
|     /* Endpoint L0s Acceptable LatencyMust be 0x0 for non-Endpoint devices. */ | ||||
|     uint_reg_t l0s_lat                    : 1; | ||||
|     /* Reserved. */ | ||||
|     uint_reg_t __reserved_3               : 31; | ||||
| #else   /* __BIG_ENDIAN__ */ | ||||
|     uint_reg_t __reserved_3               : 31; | ||||
|     uint_reg_t l0s_lat                    : 1; | ||||
|     uint_reg_t __reserved_2               : 4; | ||||
|     uint_reg_t slot_pwr_scale             : 2; | ||||
|     uint_reg_t slot_pwr_lim               : 8; | ||||
|     uint_reg_t __reserved_1               : 2; | ||||
|     uint_reg_t rer                        : 1; | ||||
|     uint_reg_t r3                         : 1; | ||||
|     uint_reg_t r2                         : 1; | ||||
|     uint_reg_t r1                         : 1; | ||||
|     uint_reg_t l1_lat                     : 3; | ||||
|     uint_reg_t __reserved_0               : 3; | ||||
|     uint_reg_t ext_tag_field_supported    : 1; | ||||
|     uint_reg_t phantom_function_supported : 2; | ||||
|     uint_reg_t mps_sup                    : 3; | ||||
| #endif | ||||
|   }; | ||||
| 
 | ||||
|   uint_reg_t word; | ||||
| } TRIO_PCIE_RC_DEVICE_CAP_t; | ||||
| 
 | ||||
| /* Device Control Register. */ | ||||
| 
 | ||||
| __extension__ | ||||
| typedef union | ||||
| { | ||||
|   struct | ||||
|   { | ||||
| #ifndef __BIG_ENDIAN__ | ||||
|     /* Correctable Error Reporting Enable */ | ||||
|     uint_reg_t cor_err_ena      : 1; | ||||
|     /* Non-Fatal Error Reporting Enable */ | ||||
|     uint_reg_t nf_err_ena       : 1; | ||||
|     /* Fatal Error Reporting Enable */ | ||||
|     uint_reg_t fatal_err_ena    : 1; | ||||
|     /* Unsupported Request Reporting Enable */ | ||||
|     uint_reg_t ur_ena           : 1; | ||||
|     /* Relaxed orderring enable */ | ||||
|     uint_reg_t ro_ena           : 1; | ||||
|     /* Max Payload Size */ | ||||
|     uint_reg_t max_payload_size : 3; | ||||
|     /* Extended Tag Field Enable */ | ||||
|     uint_reg_t ext_tag          : 1; | ||||
|     /* Phantom Function Enable */ | ||||
|     uint_reg_t ph_fn_ena        : 1; | ||||
|     /* AUX Power PM Enable */ | ||||
|     uint_reg_t aux_pm_ena       : 1; | ||||
|     /* Enable NoSnoop */ | ||||
|     uint_reg_t no_snoop         : 1; | ||||
|     /* Max read request size */ | ||||
|     uint_reg_t max_read_req_sz  : 3; | ||||
|     /* Reserved. */ | ||||
|     uint_reg_t __reserved       : 49; | ||||
| #else   /* __BIG_ENDIAN__ */ | ||||
|     uint_reg_t __reserved       : 49; | ||||
|     uint_reg_t max_read_req_sz  : 3; | ||||
|     uint_reg_t no_snoop         : 1; | ||||
|     uint_reg_t aux_pm_ena       : 1; | ||||
|     uint_reg_t ph_fn_ena        : 1; | ||||
|     uint_reg_t ext_tag          : 1; | ||||
|     uint_reg_t max_payload_size : 3; | ||||
|     uint_reg_t ro_ena           : 1; | ||||
|     uint_reg_t ur_ena           : 1; | ||||
|     uint_reg_t fatal_err_ena    : 1; | ||||
|     uint_reg_t nf_err_ena       : 1; | ||||
|     uint_reg_t cor_err_ena      : 1; | ||||
| #endif | ||||
|   }; | ||||
| 
 | ||||
|   uint_reg_t word; | ||||
| } TRIO_PCIE_RC_DEVICE_CONTROL_t; | ||||
| #endif /* !defined(__ASSEMBLER__) */ | ||||
| 
 | ||||
| #endif /* !defined(__ARCH_TRIO_PCIE_RC_H__) */ | ||||
							
								
								
									
										24
									
								
								arch/tile/include/arch/trio_pcie_rc_def.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										24
									
								
								arch/tile/include/arch/trio_pcie_rc_def.h
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,24 @@ | ||||
| /*
 | ||||
|  * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||||
|  * | ||||
|  *   This program is free software; you can redistribute it and/or | ||||
|  *   modify it under the terms of the GNU General Public License | ||||
|  *   as published by the Free Software Foundation, version 2. | ||||
|  * | ||||
|  *   This program is distributed in the hope that it will be useful, but | ||||
|  *   WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||||
|  *   NON INFRINGEMENT.  See the GNU General Public License for | ||||
|  *   more details. | ||||
|  */ | ||||
| 
 | ||||
| /* Machine-generated file; do not edit. */ | ||||
| 
 | ||||
| #ifndef __ARCH_TRIO_PCIE_RC_DEF_H__ | ||||
| #define __ARCH_TRIO_PCIE_RC_DEF_H__ | ||||
| #define TRIO_PCIE_RC_DEVICE_CAP 0x0074 | ||||
| #define TRIO_PCIE_RC_DEVICE_CONTROL 0x0078 | ||||
| #define TRIO_PCIE_RC_DEVICE_ID_VEN_ID 0x0000 | ||||
| #define TRIO_PCIE_RC_DEVICE_ID_VEN_ID__DEV_ID_SHIFT 16 | ||||
| #define TRIO_PCIE_RC_REVISION_ID 0x0008 | ||||
| #endif /* !defined(__ARCH_TRIO_PCIE_RC_DEF_H__) */ | ||||
							
								
								
									
										125
									
								
								arch/tile/include/arch/trio_shm.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										125
									
								
								arch/tile/include/arch/trio_shm.h
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,125 @@ | ||||
| /*
 | ||||
|  * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||||
|  * | ||||
|  *   This program is free software; you can redistribute it and/or | ||||
|  *   modify it under the terms of the GNU General Public License | ||||
|  *   as published by the Free Software Foundation, version 2. | ||||
|  * | ||||
|  *   This program is distributed in the hope that it will be useful, but | ||||
|  *   WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||||
|  *   NON INFRINGEMENT.  See the GNU General Public License for | ||||
|  *   more details. | ||||
|  */ | ||||
| 
 | ||||
| /* Machine-generated file; do not edit. */ | ||||
| 
 | ||||
| 
 | ||||
| #ifndef __ARCH_TRIO_SHM_H__ | ||||
| #define __ARCH_TRIO_SHM_H__ | ||||
| 
 | ||||
| #include <arch/abi.h> | ||||
| #include <arch/trio_shm_def.h> | ||||
| 
 | ||||
| #ifndef __ASSEMBLER__ | ||||
| /**
 | ||||
|  * TRIO DMA Descriptor. | ||||
|  * The TRIO DMA descriptor is written by software and consumed by hardware. | ||||
|  * It is used to specify the location of transaction data in the IO and Tile | ||||
|  * domains. | ||||
|  */ | ||||
| 
 | ||||
| __extension__ | ||||
| typedef union | ||||
| { | ||||
|   struct | ||||
|   { | ||||
|     /* Word 0 */ | ||||
| 
 | ||||
| #ifndef __BIG_ENDIAN__ | ||||
|     /** Tile side virtual address. */ | ||||
|     int_reg_t va           : 42; | ||||
|     /**
 | ||||
|      * Encoded size of buffer used on push DMA when C=1: | ||||
|      * 0 = 128 bytes | ||||
|      * 1 = 256 bytes | ||||
|      * 2 = 512 bytes | ||||
|      * 3 = 1024 bytes | ||||
|      * 4 = 1664 bytes | ||||
|      * 5 = 4096 bytes | ||||
|      * 6 = 10368 bytes | ||||
|      * 7 = 16384 bytes | ||||
|      */ | ||||
|     uint_reg_t bsz          : 3; | ||||
|     /**
 | ||||
|      * Chaining designation.  Always zero for pull DMA | ||||
|      * 0 : Unchained buffer pointer | ||||
|      * 1 : Chained buffer pointer.  Next buffer descriptor (e.g. VA) stored | ||||
|      * in 1st 8-bytes in buffer.  For chained buffers, first 8-bytes of each | ||||
|      * buffer contain the next buffer descriptor formatted exactly like a PDE | ||||
|      * buffer descriptor.  This allows a chained PDE buffer to be sent using | ||||
|      * push DMA. | ||||
|      */ | ||||
|     uint_reg_t c            : 1; | ||||
|     /**
 | ||||
|      * Notification interrupt will be delivered when the transaction has | ||||
|      * completed (all data has been read from or written to the Tile-side | ||||
|      * buffer). | ||||
|      */ | ||||
|     uint_reg_t notif        : 1; | ||||
|     /**
 | ||||
|      * When 0, the XSIZE field specifies the total byte count for the | ||||
|      * transaction.  When 1, the XSIZE field is encoded as 2^(N+14) for N in | ||||
|      * {0..6}: | ||||
|      * 0 = 16KB | ||||
|      * 1 = 32KB | ||||
|      * 2 = 64KB | ||||
|      * 3 = 128KB | ||||
|      * 4 = 256KB | ||||
|      * 5 = 512KB | ||||
|      * 6 = 1MB | ||||
|      * All other encodings of the XSIZE field are reserved when SMOD=1 | ||||
|      */ | ||||
|     uint_reg_t smod         : 1; | ||||
|     /**
 | ||||
|      * Total number of bytes to move for this transaction.   When SMOD=1, | ||||
|      * this field is encoded - see SMOD description. | ||||
|      */ | ||||
|     uint_reg_t xsize        : 14; | ||||
|     /** Reserved. */ | ||||
|     uint_reg_t __reserved_0 : 1; | ||||
|     /**
 | ||||
|      * Generation number.  Used to indicate a valid descriptor in ring.  When | ||||
|      * a new descriptor is written into the ring, software must toggle this | ||||
|      * bit.  The net effect is that the GEN bit being written into new | ||||
|      * descriptors toggles each time the ring tail pointer wraps. | ||||
|      */ | ||||
|     uint_reg_t gen          : 1; | ||||
| #else   /* __BIG_ENDIAN__ */ | ||||
|     uint_reg_t gen          : 1; | ||||
|     uint_reg_t __reserved_0 : 1; | ||||
|     uint_reg_t xsize        : 14; | ||||
|     uint_reg_t smod         : 1; | ||||
|     uint_reg_t notif        : 1; | ||||
|     uint_reg_t c            : 1; | ||||
|     uint_reg_t bsz          : 3; | ||||
|     int_reg_t va           : 42; | ||||
| #endif | ||||
| 
 | ||||
|     /* Word 1 */ | ||||
| 
 | ||||
| #ifndef __BIG_ENDIAN__ | ||||
|     /** IO-side address */ | ||||
|     uint_reg_t io_address : 64; | ||||
| #else   /* __BIG_ENDIAN__ */ | ||||
|     uint_reg_t io_address : 64; | ||||
| #endif | ||||
| 
 | ||||
|   }; | ||||
| 
 | ||||
|   /** Word access */ | ||||
|   uint_reg_t words[2]; | ||||
| } TRIO_DMA_DESC_t; | ||||
| #endif /* !defined(__ASSEMBLER__) */ | ||||
| 
 | ||||
| #endif /* !defined(__ARCH_TRIO_SHM_H__) */ | ||||
							
								
								
									
										19
									
								
								arch/tile/include/arch/trio_shm_def.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										19
									
								
								arch/tile/include/arch/trio_shm_def.h
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,19 @@ | ||||
| /*
 | ||||
|  * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||||
|  * | ||||
|  *   This program is free software; you can redistribute it and/or | ||||
|  *   modify it under the terms of the GNU General Public License | ||||
|  *   as published by the Free Software Foundation, version 2. | ||||
|  * | ||||
|  *   This program is distributed in the hope that it will be useful, but | ||||
|  *   WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||||
|  *   NON INFRINGEMENT.  See the GNU General Public License for | ||||
|  *   more details. | ||||
|  */ | ||||
| 
 | ||||
| /* Machine-generated file; do not edit. */ | ||||
| 
 | ||||
| #ifndef __ARCH_TRIO_SHM_DEF_H__ | ||||
| #define __ARCH_TRIO_SHM_DEF_H__ | ||||
| #endif /* !defined(__ARCH_TRIO_SHM_DEF_H__) */ | ||||
							
								
								
									
										26
									
								
								arch/tile/include/arch/usb_host.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										26
									
								
								arch/tile/include/arch/usb_host.h
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,26 @@ | ||||
| /*
 | ||||
|  * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||||
|  * | ||||
|  *   This program is free software; you can redistribute it and/or | ||||
|  *   modify it under the terms of the GNU General Public License | ||||
|  *   as published by the Free Software Foundation, version 2. | ||||
|  * | ||||
|  *   This program is distributed in the hope that it will be useful, but | ||||
|  *   WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||||
|  *   NON INFRINGEMENT.  See the GNU General Public License for | ||||
|  *   more details. | ||||
|  */ | ||||
| 
 | ||||
| /* Machine-generated file; do not edit. */ | ||||
| 
 | ||||
| #ifndef __ARCH_USB_HOST_H__ | ||||
| #define __ARCH_USB_HOST_H__ | ||||
| 
 | ||||
| #include <arch/abi.h> | ||||
| #include <arch/usb_host_def.h> | ||||
| 
 | ||||
| #ifndef __ASSEMBLER__ | ||||
| #endif /* !defined(__ASSEMBLER__) */ | ||||
| 
 | ||||
| #endif /* !defined(__ARCH_USB_HOST_H__) */ | ||||
							
								
								
									
										19
									
								
								arch/tile/include/arch/usb_host_def.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										19
									
								
								arch/tile/include/arch/usb_host_def.h
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,19 @@ | ||||
| /*
 | ||||
|  * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||||
|  * | ||||
|  *   This program is free software; you can redistribute it and/or | ||||
|  *   modify it under the terms of the GNU General Public License | ||||
|  *   as published by the Free Software Foundation, version 2. | ||||
|  * | ||||
|  *   This program is distributed in the hope that it will be useful, but | ||||
|  *   WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||||
|  *   NON INFRINGEMENT.  See the GNU General Public License for | ||||
|  *   more details. | ||||
|  */ | ||||
| 
 | ||||
| /* Machine-generated file; do not edit. */ | ||||
| 
 | ||||
| #ifndef __ARCH_USB_HOST_DEF_H__ | ||||
| #define __ARCH_USB_HOST_DEF_H__ | ||||
| #endif /* !defined(__ARCH_USB_HOST_DEF_H__) */ | ||||
| @ -9,7 +9,6 @@ header-y += hardwall.h | ||||
| generic-y += bug.h | ||||
| generic-y += bugs.h | ||||
| generic-y += cputime.h | ||||
| generic-y += device.h | ||||
| generic-y += div64.h | ||||
| generic-y += emergency-restart.h | ||||
| generic-y += errno.h | ||||
|  | ||||
| @ -27,11 +27,17 @@ | ||||
| #define L2_CACHE_ALIGN(x)	(((x)+(L2_CACHE_BYTES-1)) & -L2_CACHE_BYTES) | ||||
| 
 | ||||
| /*
 | ||||
|  * TILE-Gx is fully coherent so we don't need to define ARCH_DMA_MINALIGN. | ||||
|  * TILEPro I/O is not always coherent (networking typically uses coherent | ||||
|  * I/O, but PCI traffic does not) and setting ARCH_DMA_MINALIGN to the | ||||
|  * L2 cacheline size helps ensure that kernel heap allocations are aligned. | ||||
|  * TILE-Gx I/O is always coherent when used on hash-for-home pages. | ||||
|  * | ||||
|  * However, it's possible at runtime to request not to use hash-for-home | ||||
|  * for the kernel heap, in which case the kernel will use flush-and-inval | ||||
|  * to manage coherence.  As a result, we use L2_CACHE_BYTES for the | ||||
|  * DMA minimum alignment to avoid false sharing in the kernel heap. | ||||
|  */ | ||||
| #ifndef __tilegx__ | ||||
| #define ARCH_DMA_MINALIGN	L2_CACHE_BYTES | ||||
| #endif | ||||
| 
 | ||||
| /* use the cache line size for the L2, which is where it counts */ | ||||
| #define SMP_CACHE_BYTES_SHIFT	L2_CACHE_SHIFT | ||||
|  | ||||
| @ -21,4 +21,22 @@ | ||||
| __wsum do_csum(const unsigned char *buff, int len); | ||||
| #define do_csum do_csum | ||||
| 
 | ||||
| /*
 | ||||
|  * Return the sum of all the 16-bit subwords in a long. | ||||
|  * This sums two subwords on a 32-bit machine, and four on 64 bits. | ||||
|  * The implementation does two vector adds to capture any overflow. | ||||
|  */ | ||||
| static inline unsigned int csum_long(unsigned long x) | ||||
| { | ||||
| 	unsigned long ret; | ||||
| #ifdef __tilegx__ | ||||
| 	ret = __insn_v2sadu(x, 0); | ||||
| 	ret = __insn_v2sadu(ret, 0); | ||||
| #else | ||||
| 	ret = __insn_sadh_u(x, 0); | ||||
| 	ret = __insn_sadh_u(ret, 0); | ||||
| #endif | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| #endif /* _ASM_TILE_CHECKSUM_H */ | ||||
|  | ||||
							
								
								
									
										33
									
								
								arch/tile/include/asm/device.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										33
									
								
								arch/tile/include/asm/device.h
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,33 @@ | ||||
| /*
 | ||||
|  * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||||
|  * | ||||
|  *   This program is free software; you can redistribute it and/or | ||||
|  *   modify it under the terms of the GNU General Public License | ||||
|  *   as published by the Free Software Foundation, version 2. | ||||
|  * | ||||
|  *   This program is distributed in the hope that it will be useful, but | ||||
|  *   WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||||
|  *   NON INFRINGEMENT.  See the GNU General Public License for | ||||
|  *   more details. | ||||
|  * Arch specific extensions to struct device | ||||
|  */ | ||||
| 
 | ||||
| #ifndef _ASM_TILE_DEVICE_H | ||||
| #define _ASM_TILE_DEVICE_H | ||||
| 
 | ||||
| struct dev_archdata { | ||||
| 	/* DMA operations on that device */ | ||||
|         struct dma_map_ops	*dma_ops; | ||||
| 
 | ||||
| 	/* Offset of the DMA address from the PA. */ | ||||
| 	dma_addr_t		dma_offset; | ||||
| 
 | ||||
| 	/* Highest DMA address that can be generated by this device. */ | ||||
| 	dma_addr_t		max_direct_dma_addr; | ||||
| }; | ||||
| 
 | ||||
| struct pdev_archdata { | ||||
| }; | ||||
| 
 | ||||
| #endif /* _ASM_TILE_DEVICE_H */ | ||||
| @ -20,69 +20,80 @@ | ||||
| #include <linux/cache.h> | ||||
| #include <linux/io.h> | ||||
| 
 | ||||
| /*
 | ||||
|  * Note that on x86 and powerpc, there is a "struct dma_mapping_ops" | ||||
|  * that is used for all the DMA operations.  For now, we don't have an | ||||
|  * equivalent on tile, because we only have a single way of doing DMA. | ||||
|  * (Tilera bug 7994 to use dma_mapping_ops.) | ||||
|  */ | ||||
| extern struct dma_map_ops *tile_dma_map_ops; | ||||
| extern struct dma_map_ops *gx_pci_dma_map_ops; | ||||
| extern struct dma_map_ops *gx_legacy_pci_dma_map_ops; | ||||
| 
 | ||||
| #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | ||||
| #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | ||||
| static inline struct dma_map_ops *get_dma_ops(struct device *dev) | ||||
| { | ||||
| 	if (dev && dev->archdata.dma_ops) | ||||
| 		return dev->archdata.dma_ops; | ||||
| 	else | ||||
| 		return tile_dma_map_ops; | ||||
| } | ||||
| 
 | ||||
| extern dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | ||||
| 			  enum dma_data_direction); | ||||
| extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, | ||||
| 			     size_t size, enum dma_data_direction); | ||||
| extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | ||||
| 	       enum dma_data_direction); | ||||
| extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg, | ||||
| 			 int nhwentries, enum dma_data_direction); | ||||
| extern dma_addr_t dma_map_page(struct device *dev, struct page *page, | ||||
| 			       unsigned long offset, size_t size, | ||||
| 			       enum dma_data_direction); | ||||
| extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address, | ||||
| 			   size_t size, enum dma_data_direction); | ||||
| extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | ||||
| 				int nelems, enum dma_data_direction); | ||||
| extern void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | ||||
| 				   int nelems, enum dma_data_direction); | ||||
| static inline dma_addr_t get_dma_offset(struct device *dev) | ||||
| { | ||||
| 	return dev->archdata.dma_offset; | ||||
| } | ||||
| 
 | ||||
| static inline void set_dma_offset(struct device *dev, dma_addr_t off) | ||||
| { | ||||
| 	dev->archdata.dma_offset = off; | ||||
| } | ||||
| 
 | ||||
| void *dma_alloc_coherent(struct device *dev, size_t size, | ||||
| 			   dma_addr_t *dma_handle, gfp_t flag); | ||||
| static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) | ||||
| { | ||||
| 	return paddr + get_dma_offset(dev); | ||||
| } | ||||
| 
 | ||||
| void dma_free_coherent(struct device *dev, size_t size, | ||||
| 			 void *vaddr, dma_addr_t dma_handle); | ||||
| static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) | ||||
| { | ||||
| 	return daddr - get_dma_offset(dev); | ||||
| } | ||||
| 
 | ||||
| extern void dma_sync_single_for_cpu(struct device *, dma_addr_t, size_t, | ||||
| 				    enum dma_data_direction); | ||||
| extern void dma_sync_single_for_device(struct device *, dma_addr_t, | ||||
| 				       size_t, enum dma_data_direction); | ||||
| extern void dma_sync_single_range_for_cpu(struct device *, dma_addr_t, | ||||
| 					  unsigned long offset, size_t, | ||||
| 					  enum dma_data_direction); | ||||
| extern void dma_sync_single_range_for_device(struct device *, dma_addr_t, | ||||
| 					     unsigned long offset, size_t, | ||||
| 					     enum dma_data_direction); | ||||
| extern void dma_cache_sync(struct device *dev, void *vaddr, size_t, | ||||
| 			   enum dma_data_direction); | ||||
| static inline void dma_mark_clean(void *addr, size_t size) {} | ||||
| 
 | ||||
| #include <asm-generic/dma-mapping-common.h> | ||||
| 
 | ||||
| static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) | ||||
| { | ||||
| 	dev->archdata.dma_ops = ops; | ||||
| } | ||||
| 
 | ||||
| static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) | ||||
| { | ||||
| 	if (!dev->dma_mask) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	return addr + size - 1 <= *dev->dma_mask; | ||||
| } | ||||
| 
 | ||||
| static inline int | ||||
| dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||||
| { | ||||
| 	return 0; | ||||
| 	return get_dma_ops(dev)->mapping_error(dev, dma_addr); | ||||
| } | ||||
| 
 | ||||
| static inline int | ||||
| dma_supported(struct device *dev, u64 mask) | ||||
| { | ||||
| 	return 1; | ||||
| 	return get_dma_ops(dev)->dma_supported(dev, mask); | ||||
| } | ||||
| 
 | ||||
| static inline int | ||||
| dma_set_mask(struct device *dev, u64 mask) | ||||
| { | ||||
| 	struct dma_map_ops *dma_ops = get_dma_ops(dev); | ||||
| 
 | ||||
| 	/* Handle legacy PCI devices with limited memory addressability. */ | ||||
| 	if ((dma_ops == gx_pci_dma_map_ops) && (mask <= DMA_BIT_MASK(32))) { | ||||
| 		set_dma_ops(dev, gx_legacy_pci_dma_map_ops); | ||||
| 		set_dma_offset(dev, 0); | ||||
| 		if (mask > dev->archdata.max_direct_dma_addr) | ||||
| 			mask = dev->archdata.max_direct_dma_addr; | ||||
| 	} | ||||
| 
 | ||||
| 	if (!dev->dma_mask || !dma_supported(dev, mask)) | ||||
| 		return -EIO; | ||||
| 
 | ||||
| @ -91,4 +102,43 @@ dma_set_mask(struct device *dev, u64 mask) | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| static inline void *dma_alloc_attrs(struct device *dev, size_t size, | ||||
| 				    dma_addr_t *dma_handle, gfp_t flag, | ||||
| 				    struct dma_attrs *attrs) | ||||
| { | ||||
| 	struct dma_map_ops *dma_ops = get_dma_ops(dev); | ||||
| 	void *cpu_addr; | ||||
| 
 | ||||
| 	cpu_addr = dma_ops->alloc(dev, size, dma_handle, flag, attrs); | ||||
| 
 | ||||
| 	debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); | ||||
| 
 | ||||
| 	return cpu_addr; | ||||
| } | ||||
| 
 | ||||
| static inline void dma_free_attrs(struct device *dev, size_t size, | ||||
| 				  void *cpu_addr, dma_addr_t dma_handle, | ||||
| 				  struct dma_attrs *attrs) | ||||
| { | ||||
| 	struct dma_map_ops *dma_ops = get_dma_ops(dev); | ||||
| 
 | ||||
| 	debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); | ||||
| 
 | ||||
| 	dma_ops->free(dev, size, cpu_addr, dma_handle, attrs); | ||||
| } | ||||
| 
 | ||||
| #define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) | ||||
| #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) | ||||
| #define dma_free_coherent(d, s, v, h) dma_free_attrs(d, s, v, h, NULL) | ||||
| #define dma_free_noncoherent(d, s, v, h) dma_free_attrs(d, s, v, h, NULL) | ||||
| 
 | ||||
| /*
 | ||||
|  * dma_alloc_noncoherent() is #defined to return coherent memory, | ||||
|  * so there's no need to do any flushing here. | ||||
|  */ | ||||
| static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | ||||
| 				  enum dma_data_direction direction) | ||||
| { | ||||
| } | ||||
| 
 | ||||
| #endif /* _ASM_TILE_DMA_MAPPING_H */ | ||||
|  | ||||
| @ -45,14 +45,22 @@ | ||||
|  * | ||||
|  * TLB entries of such buffers will not be flushed across | ||||
|  * task switches. | ||||
|  * | ||||
|  * We don't bother with a FIX_HOLE since above the fixmaps | ||||
|  * is unmapped memory in any case. | ||||
|  */ | ||||
| enum fixed_addresses { | ||||
| #ifdef __tilegx__ | ||||
| 	/*
 | ||||
| 	 * TILEPro has unmapped memory above so the hole isn't needed, | ||||
| 	 * and in any case the hole pushes us over a single 16MB pmd. | ||||
| 	 */ | ||||
| 	FIX_HOLE, | ||||
| #endif | ||||
| #ifdef CONFIG_HIGHMEM | ||||
| 	FIX_KMAP_BEGIN,	/* reserved pte's for temporary kernel mappings */ | ||||
| 	FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, | ||||
| #endif | ||||
| #ifdef __tilegx__  /* see homecache.c */ | ||||
| 	FIX_HOMECACHE_BEGIN, | ||||
| 	FIX_HOMECACHE_END = FIX_HOMECACHE_BEGIN+(NR_CPUS)-1, | ||||
| #endif | ||||
| 	__end_of_permanent_fixed_addresses, | ||||
| 
 | ||||
|  | ||||
| @ -79,10 +79,17 @@ extern void homecache_change_page_home(struct page *, int order, int home); | ||||
| /*
 | ||||
|  * Flush a page out of whatever cache(s) it is in. | ||||
|  * This is more than just finv, since it properly handles waiting | ||||
|  * for the data to reach memory on tilepro, but it can be quite | ||||
|  * heavyweight, particularly on hash-for-home memory. | ||||
|  * for the data to reach memory, but it can be quite | ||||
|  * heavyweight, particularly on incoherent or immutable memory. | ||||
|  */ | ||||
| extern void homecache_flush_cache(struct page *, int order); | ||||
| extern void homecache_finv_page(struct page *); | ||||
| 
 | ||||
| /*
 | ||||
|  * Flush a page out of the specified home cache. | ||||
|  * Note that the specified home need not be the actual home of the page, | ||||
|  * as for example might be the case when coordinating with I/O devices. | ||||
|  */ | ||||
| extern void homecache_finv_map_page(struct page *, int home); | ||||
| 
 | ||||
| /*
 | ||||
|  * Allocate a page with the given GFP flags, home, and optionally | ||||
| @ -104,10 +111,10 @@ extern struct page *homecache_alloc_pages_node(int nid, gfp_t gfp_mask, | ||||
|  * routines use homecache_change_page_home() to reset the home | ||||
|  * back to the default before returning the page to the allocator. | ||||
|  */ | ||||
| void __homecache_free_pages(struct page *, unsigned int order); | ||||
| void homecache_free_pages(unsigned long addr, unsigned int order); | ||||
| #define homecache_free_page(page) \ | ||||
|   homecache_free_pages((page), 0) | ||||
| 
 | ||||
| #define __homecache_free_page(page) __homecache_free_pages((page), 0) | ||||
| #define homecache_free_page(page) homecache_free_pages((page), 0) | ||||
| 
 | ||||
| 
 | ||||
| /*
 | ||||
|  | ||||
| @ -62,6 +62,92 @@ extern void iounmap(volatile void __iomem *addr); | ||||
| #define mm_ptov(addr)		((void *)phys_to_virt(addr)) | ||||
| #define mm_vtop(addr)		((unsigned long)virt_to_phys(addr)) | ||||
| 
 | ||||
| #if CHIP_HAS_MMIO() | ||||
| 
 | ||||
| /*
 | ||||
|  * We use inline assembly to guarantee that the compiler does not | ||||
|  * split an access into multiple byte-sized accesses as it might | ||||
|  * sometimes do if a register data structure is marked "packed". | ||||
|  * Obviously on tile we can't tolerate such an access being | ||||
|  * actually unaligned, but we want to avoid the case where the | ||||
|  * compiler conservatively would generate multiple accesses even | ||||
|  * for an aligned read or write. | ||||
|  */ | ||||
| 
 | ||||
| static inline u8 __raw_readb(const volatile void __iomem *addr) | ||||
| { | ||||
| 	return *(const volatile u8 __force *)addr; | ||||
| } | ||||
| 
 | ||||
| static inline u16 __raw_readw(const volatile void __iomem *addr) | ||||
| { | ||||
| 	u16 ret; | ||||
| 	asm volatile("ld2u %0, %1" : "=r" (ret) : "r" (addr)); | ||||
| 	barrier(); | ||||
| 	return le16_to_cpu(ret); | ||||
| } | ||||
| 
 | ||||
| static inline u32 __raw_readl(const volatile void __iomem *addr) | ||||
| { | ||||
| 	u32 ret; | ||||
| 	/* Sign-extend to conform to u32 ABI sign-extension convention. */ | ||||
| 	asm volatile("ld4s %0, %1" : "=r" (ret) : "r" (addr)); | ||||
| 	barrier(); | ||||
| 	return le32_to_cpu(ret); | ||||
| } | ||||
| 
 | ||||
| static inline u64 __raw_readq(const volatile void __iomem *addr) | ||||
| { | ||||
| 	u64 ret; | ||||
| 	asm volatile("ld %0, %1" : "=r" (ret) : "r" (addr)); | ||||
| 	barrier(); | ||||
| 	return le64_to_cpu(ret); | ||||
| } | ||||
| 
 | ||||
| static inline void __raw_writeb(u8 val, volatile void __iomem *addr) | ||||
| { | ||||
| 	*(volatile u8 __force *)addr = val; | ||||
| } | ||||
| 
 | ||||
| static inline void __raw_writew(u16 val, volatile void __iomem *addr) | ||||
| { | ||||
| 	asm volatile("st2 %0, %1" :: "r" (addr), "r" (cpu_to_le16(val))); | ||||
| } | ||||
| 
 | ||||
| static inline void __raw_writel(u32 val, volatile void __iomem *addr) | ||||
| { | ||||
| 	asm volatile("st4 %0, %1" :: "r" (addr), "r" (cpu_to_le32(val))); | ||||
| } | ||||
| 
 | ||||
| static inline void __raw_writeq(u64 val, volatile void __iomem *addr) | ||||
| { | ||||
| 	asm volatile("st %0, %1" :: "r" (addr), "r" (cpu_to_le64(val))); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * The on-chip I/O hardware on tilegx is configured with VA=PA for the | ||||
|  * kernel's PA range.  The low-level APIs and field names use "va" and | ||||
|  * "void *" nomenclature, to be consistent with the general notion | ||||
|  * that the addresses in question are virtualizable, but in the kernel | ||||
|  * context we are actually manipulating PA values.  (In other contexts, | ||||
|  * e.g. access from user space, we do in fact use real virtual addresses | ||||
|  * in the va fields.)  To allow readers of the code to understand what's | ||||
|  * happening, we direct their attention to this comment by using the | ||||
|  * following two functions that just duplicate __va() and __pa(). | ||||
|  */ | ||||
| typedef unsigned long tile_io_addr_t; | ||||
| static inline tile_io_addr_t va_to_tile_io_addr(void *va) | ||||
| { | ||||
| 	BUILD_BUG_ON(sizeof(phys_addr_t) != sizeof(tile_io_addr_t)); | ||||
| 	return __pa(va); | ||||
| } | ||||
| static inline void *tile_io_addr_to_va(tile_io_addr_t tile_io_addr) | ||||
| { | ||||
| 	return __va(tile_io_addr); | ||||
| } | ||||
| 
 | ||||
| #else /* CHIP_HAS_MMIO() */ | ||||
| 
 | ||||
| #ifdef CONFIG_PCI | ||||
| 
 | ||||
| extern u8 _tile_readb(unsigned long addr); | ||||
| @ -73,10 +159,19 @@ extern void _tile_writew(u16 val, unsigned long addr); | ||||
| extern void _tile_writel(u32 val, unsigned long addr); | ||||
| extern void _tile_writeq(u64 val, unsigned long addr); | ||||
| 
 | ||||
| #else | ||||
| #define __raw_readb(addr) _tile_readb((unsigned long)addr) | ||||
| #define __raw_readw(addr) _tile_readw((unsigned long)addr) | ||||
| #define __raw_readl(addr) _tile_readl((unsigned long)addr) | ||||
| #define __raw_readq(addr) _tile_readq((unsigned long)addr) | ||||
| #define __raw_writeb(val, addr) _tile_writeb(val, (unsigned long)addr) | ||||
| #define __raw_writew(val, addr) _tile_writew(val, (unsigned long)addr) | ||||
| #define __raw_writel(val, addr) _tile_writel(val, (unsigned long)addr) | ||||
| #define __raw_writeq(val, addr) _tile_writeq(val, (unsigned long)addr) | ||||
| 
 | ||||
| #else /* CONFIG_PCI */ | ||||
| 
 | ||||
| /*
 | ||||
|  * The Tile architecture does not support IOMEM unless PCI is enabled. | ||||
|  * The tilepro architecture does not support IOMEM unless PCI is enabled. | ||||
|  * Unfortunately we can't yet simply not declare these methods, | ||||
|  * since some generic code that compiles into the kernel, but | ||||
|  * we never run, uses them unconditionally. | ||||
| @ -88,65 +183,58 @@ static inline int iomem_panic(void) | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| static inline u8 _tile_readb(unsigned long addr) | ||||
| static inline u8 readb(unsigned long addr) | ||||
| { | ||||
| 	return iomem_panic(); | ||||
| } | ||||
| 
 | ||||
| static inline u16 _tile_readw(unsigned long addr) | ||||
| static inline u16 _readw(unsigned long addr) | ||||
| { | ||||
| 	return iomem_panic(); | ||||
| } | ||||
| 
 | ||||
| static inline u32 _tile_readl(unsigned long addr) | ||||
| static inline u32 readl(unsigned long addr) | ||||
| { | ||||
| 	return iomem_panic(); | ||||
| } | ||||
| 
 | ||||
| static inline u64 _tile_readq(unsigned long addr) | ||||
| static inline u64 readq(unsigned long addr) | ||||
| { | ||||
| 	return iomem_panic(); | ||||
| } | ||||
| 
 | ||||
| static inline void _tile_writeb(u8  val, unsigned long addr) | ||||
| static inline void writeb(u8  val, unsigned long addr) | ||||
| { | ||||
| 	iomem_panic(); | ||||
| } | ||||
| 
 | ||||
| static inline void _tile_writew(u16 val, unsigned long addr) | ||||
| static inline void writew(u16 val, unsigned long addr) | ||||
| { | ||||
| 	iomem_panic(); | ||||
| } | ||||
| 
 | ||||
| static inline void _tile_writel(u32 val, unsigned long addr) | ||||
| static inline void writel(u32 val, unsigned long addr) | ||||
| { | ||||
| 	iomem_panic(); | ||||
| } | ||||
| 
 | ||||
| static inline void _tile_writeq(u64 val, unsigned long addr) | ||||
| static inline void writeq(u64 val, unsigned long addr) | ||||
| { | ||||
| 	iomem_panic(); | ||||
| } | ||||
| 
 | ||||
| #endif | ||||
| #endif /* CONFIG_PCI */ | ||||
| 
 | ||||
| #define readb(addr) _tile_readb((unsigned long)addr) | ||||
| #define readw(addr) _tile_readw((unsigned long)addr) | ||||
| #define readl(addr) _tile_readl((unsigned long)addr) | ||||
| #define readq(addr) _tile_readq((unsigned long)addr) | ||||
| #define writeb(val, addr) _tile_writeb(val, (unsigned long)addr) | ||||
| #define writew(val, addr) _tile_writew(val, (unsigned long)addr) | ||||
| #define writel(val, addr) _tile_writel(val, (unsigned long)addr) | ||||
| #define writeq(val, addr) _tile_writeq(val, (unsigned long)addr) | ||||
| #endif /* CHIP_HAS_MMIO() */ | ||||
| 
 | ||||
| #define __raw_readb readb | ||||
| #define __raw_readw readw | ||||
| #define __raw_readl readl | ||||
| #define __raw_readq readq | ||||
| #define __raw_writeb writeb | ||||
| #define __raw_writew writew | ||||
| #define __raw_writel writel | ||||
| #define __raw_writeq writeq | ||||
| #define readb __raw_readb | ||||
| #define readw __raw_readw | ||||
| #define readl __raw_readl | ||||
| #define readq __raw_readq | ||||
| #define writeb __raw_writeb | ||||
| #define writew __raw_writew | ||||
| #define writel __raw_writel | ||||
| #define writeq __raw_writeq | ||||
| 
 | ||||
| #define readb_relaxed readb | ||||
| #define readw_relaxed readw | ||||
|  | ||||
| @ -1,33 +0,0 @@ | ||||
| /*
 | ||||
|  * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||||
|  * | ||||
|  *   This program is free software; you can redistribute it and/or | ||||
|  *   modify it under the terms of the GNU General Public License | ||||
|  *   as published by the Free Software Foundation, version 2. | ||||
|  * | ||||
|  *   This program is distributed in the hope that it will be useful, but | ||||
|  *   WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||||
|  *   NON INFRINGEMENT.  See the GNU General Public License for | ||||
|  *   more details. | ||||
|  * | ||||
|  * The hypervisor's memory controller profiling infrastructure allows | ||||
|  * the programmer to find out what fraction of the available memory | ||||
|  * bandwidth is being consumed at each memory controller.  The | ||||
|  * profiler provides start, stop, and clear operations to allows | ||||
|  * profiling over a specific time window, as well as an interface for | ||||
|  * reading the most recent profile values. | ||||
|  * | ||||
|  * This header declares IOCTL codes necessary to control memprof. | ||||
|  */ | ||||
| #ifndef _ASM_TILE_MEMPROF_H | ||||
| #define _ASM_TILE_MEMPROF_H | ||||
| 
 | ||||
| #include <linux/ioctl.h> | ||||
| 
 | ||||
| #define MEMPROF_IOCTL_TYPE 0xB4 | ||||
| #define MEMPROF_IOCTL_START _IO(MEMPROF_IOCTL_TYPE, 0) | ||||
| #define MEMPROF_IOCTL_STOP _IO(MEMPROF_IOCTL_TYPE, 1) | ||||
| #define MEMPROF_IOCTL_CLEAR _IO(MEMPROF_IOCTL_TYPE, 2) | ||||
| 
 | ||||
| #endif /* _ASM_TILE_MEMPROF_H */ | ||||
| @ -174,7 +174,9 @@ static inline __attribute_const__ int get_order(unsigned long size) | ||||
| #define MEM_LOW_END		(HALF_VA_SPACE - 1)         /* low half */ | ||||
| #define MEM_HIGH_START		(-HALF_VA_SPACE)            /* high half */ | ||||
| #define PAGE_OFFSET		MEM_HIGH_START | ||||
| #define _VMALLOC_START		_AC(0xfffffff500000000, UL) /* 4 GB */ | ||||
| #define FIXADDR_BASE		_AC(0xfffffff400000000, UL) /* 4 GB */ | ||||
| #define FIXADDR_TOP		_AC(0xfffffff500000000, UL) /* 4 GB */ | ||||
| #define _VMALLOC_START		FIXADDR_TOP | ||||
| #define HUGE_VMAP_BASE		_AC(0xfffffff600000000, UL) /* 4 GB */ | ||||
| #define MEM_SV_START		_AC(0xfffffff700000000, UL) /* 256 MB */ | ||||
| #define MEM_SV_INTRPT		MEM_SV_START | ||||
| @ -185,9 +187,6 @@ static inline __attribute_const__ int get_order(unsigned long size) | ||||
| /* Highest DTLB address we will use */ | ||||
| #define KERNEL_HIGH_VADDR	MEM_SV_START | ||||
| 
 | ||||
| /* Since we don't currently provide any fixmaps, we use an impossible VA. */ | ||||
| #define FIXADDR_TOP             MEM_HV_START | ||||
| 
 | ||||
| #else /* !__tilegx__ */ | ||||
| 
 | ||||
| /*
 | ||||
|  | ||||
| @ -15,9 +15,13 @@ | ||||
| #ifndef _ASM_TILE_PCI_H | ||||
| #define _ASM_TILE_PCI_H | ||||
| 
 | ||||
| #include <linux/dma-mapping.h> | ||||
| #include <linux/pci.h> | ||||
| #include <linux/numa.h> | ||||
| #include <asm-generic/pci_iomap.h> | ||||
| 
 | ||||
| #ifndef __tilegx__ | ||||
| 
 | ||||
| /*
 | ||||
|  * Structure of a PCI controller (host bridge) | ||||
|  */ | ||||
| @ -40,6 +44,16 @@ struct pci_controller { | ||||
| 	struct resource mem_resources[3]; | ||||
| }; | ||||
| 
 | ||||
| /*
 | ||||
|  * This flag tells if the platform is TILEmpower that needs | ||||
|  * special configuration for the PLX switch chip. | ||||
|  */ | ||||
| extern int tile_plx_gen1; | ||||
| 
 | ||||
| static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {} | ||||
| 
 | ||||
| #define	TILE_NUM_PCIE	2 | ||||
| 
 | ||||
| /*
 | ||||
|  * The hypervisor maps the entirety of CPA-space as bus addresses, so | ||||
|  * bus addresses are physical addresses.  The networking and block | ||||
| @ -47,15 +61,135 @@ struct pci_controller { | ||||
|  */ | ||||
| #define PCI_DMA_BUS_IS_PHYS     1 | ||||
| 
 | ||||
| /* generic pci stuff */ | ||||
| #include <asm-generic/pci.h> | ||||
| 
 | ||||
| #else | ||||
| 
 | ||||
| #include <asm/page.h> | ||||
| #include <gxio/trio.h> | ||||
| 
 | ||||
| /**
 | ||||
|  * We reserve the hugepage-size address range at the top of the 64-bit address | ||||
|  * space to serve as the PCI window, emulating the BAR0 space of an endpoint | ||||
|  * device. This window is used by the chip-to-chip applications running on | ||||
|  * the RC node. The reason for carving out this window is that Mem-Maps that | ||||
|  * back up this window will not overlap with those that map the real physical | ||||
|  * memory. | ||||
|  */ | ||||
| #define PCIE_HOST_BAR0_SIZE		HPAGE_SIZE | ||||
| #define PCIE_HOST_BAR0_START		HPAGE_MASK | ||||
| 
 | ||||
| /**
 | ||||
|  * The first PAGE_SIZE of the above "BAR" window is mapped to the | ||||
|  * gxpci_host_regs structure. | ||||
|  */ | ||||
| #define PCIE_HOST_REGS_SIZE		PAGE_SIZE | ||||
| 
 | ||||
| /*
 | ||||
|  * This is the PCI address where the Mem-Map interrupt regions start. | ||||
|  * We use the 2nd to the last huge page of the 64-bit address space. | ||||
|  * The last huge page is used for the rootcomplex "bar", for C2C purpose. | ||||
|  */ | ||||
| #define	MEM_MAP_INTR_REGIONS_BASE	(HPAGE_MASK - HPAGE_SIZE) | ||||
| 
 | ||||
| /*
 | ||||
|  * Each Mem-Map interrupt region occupies 4KB. | ||||
|  */ | ||||
| #define	MEM_MAP_INTR_REGION_SIZE	(1 << TRIO_MAP_MEM_LIM__ADDR_SHIFT) | ||||
| 
 | ||||
| /*
 | ||||
|  * Allocate the PCI BAR window right below 4GB. | ||||
|  */ | ||||
| #define	TILE_PCI_BAR_WINDOW_TOP		(1ULL << 32) | ||||
| 
 | ||||
| /*
 | ||||
|  * Allocate 1GB for the PCI BAR window. | ||||
|  */ | ||||
| #define	TILE_PCI_BAR_WINDOW_SIZE	(1 << 30) | ||||
| 
 | ||||
| /*
 | ||||
|  * This is the highest bus address targeting the host memory that | ||||
|  * can be generated by legacy PCI devices with 32-bit or less | ||||
|  * DMA capability, dictated by the BAR window size and location. | ||||
|  */ | ||||
| #define	TILE_PCI_MAX_DIRECT_DMA_ADDRESS \ | ||||
| 	(TILE_PCI_BAR_WINDOW_TOP - TILE_PCI_BAR_WINDOW_SIZE - 1) | ||||
| 
 | ||||
| /*
 | ||||
|  * We shift the PCI bus range for all the physical memory up by the whole PA | ||||
|  * range. The corresponding CPA of an incoming PCI request will be the PCI | ||||
|  * address minus TILE_PCI_MEM_MAP_BASE_OFFSET. This also implies | ||||
|  * that the 64-bit capable devices will be given DMA addresses as | ||||
|  * the CPA plus TILE_PCI_MEM_MAP_BASE_OFFSET. To support 32-bit | ||||
|  * devices, we create a separate map region that handles the low | ||||
|  * 4GB. | ||||
|  */ | ||||
| #define	TILE_PCI_MEM_MAP_BASE_OFFSET	(1ULL << CHIP_PA_WIDTH()) | ||||
| 
 | ||||
| /*
 | ||||
|  * Start of the PCI memory resource, which starts at the end of the | ||||
|  * maximum system physical RAM address. | ||||
|  */ | ||||
| #define	TILE_PCI_MEM_START	(1ULL << CHIP_PA_WIDTH()) | ||||
| 
 | ||||
| /*
 | ||||
|  * Structure of a PCI controller (host bridge) on Gx. | ||||
|  */ | ||||
| struct pci_controller { | ||||
| 
 | ||||
| 	/* Pointer back to the TRIO that this PCIe port is connected to. */ | ||||
| 	gxio_trio_context_t *trio; | ||||
| 	int mac;		/* PCIe mac index on the TRIO shim */ | ||||
| 	int trio_index;		/* Index of TRIO shim that contains the MAC. */ | ||||
| 
 | ||||
| 	int pio_mem_index;	/* PIO region index for memory access */ | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Mem-Map regions for all the memory controllers so that Linux can | ||||
| 	 * map all of its physical memory space to the PCI bus. | ||||
| 	 */ | ||||
| 	int mem_maps[MAX_NUMNODES]; | ||||
| 
 | ||||
| 	int index;		/* PCI domain number */ | ||||
| 	struct pci_bus *root_bus; | ||||
| 
 | ||||
| 	/* PCI memory space resource for this controller. */ | ||||
| 	struct resource mem_space; | ||||
| 	char mem_space_name[32]; | ||||
| 
 | ||||
| 	uint64_t mem_offset;	/* cpu->bus memory mapping offset. */ | ||||
| 
 | ||||
| 	int first_busno; | ||||
| 
 | ||||
| 	struct pci_ops *ops; | ||||
| 
 | ||||
| 	/* Table that maps the INTx numbers to Linux irq numbers. */ | ||||
| 	int irq_intx_table[4]; | ||||
| 
 | ||||
| 	/* Address ranges that are routed to this controller/bridge. */ | ||||
| 	struct resource mem_resources[3]; | ||||
| }; | ||||
| 
 | ||||
| extern struct pci_controller pci_controllers[TILEGX_NUM_TRIO * TILEGX_TRIO_PCIES]; | ||||
| extern gxio_trio_context_t trio_contexts[TILEGX_NUM_TRIO]; | ||||
| 
 | ||||
| extern void pci_iounmap(struct pci_dev *dev, void __iomem *); | ||||
| 
 | ||||
| /*
 | ||||
|  * The PCI address space does not equal the physical memory address | ||||
|  * space (we have an IOMMU). The IDE and SCSI device layers use this | ||||
|  * boolean for bounce buffer decisions. | ||||
|  */ | ||||
| #define PCI_DMA_BUS_IS_PHYS     0 | ||||
| 
 | ||||
| #endif /* __tilegx__ */ | ||||
| 
 | ||||
| int __init tile_pci_init(void); | ||||
| int __init pcibios_init(void); | ||||
| 
 | ||||
| static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {} | ||||
| 
 | ||||
| void __devinit pcibios_fixup_bus(struct pci_bus *bus); | ||||
| 
 | ||||
| #define	TILE_NUM_PCIE	2 | ||||
| 
 | ||||
| #define pci_domain_nr(bus) (((struct pci_controller *)(bus)->sysdata)->index) | ||||
| 
 | ||||
| /*
 | ||||
| @ -79,19 +213,10 @@ static inline int pcibios_assign_all_busses(void) | ||||
| #define PCIBIOS_MIN_MEM		0 | ||||
| #define PCIBIOS_MIN_IO		0 | ||||
| 
 | ||||
| /*
 | ||||
|  * This flag tells if the platform is TILEmpower that needs | ||||
|  * special configuration for the PLX switch chip. | ||||
|  */ | ||||
| extern int tile_plx_gen1; | ||||
| 
 | ||||
| /* Use any cpu for PCI. */ | ||||
| #define cpumask_of_pcibus(bus) cpu_online_mask | ||||
| 
 | ||||
| /* implement the pci_ DMA API in terms of the generic device dma_ one */ | ||||
| #include <asm-generic/pci-dma-compat.h> | ||||
| 
 | ||||
| /* generic pci stuff */ | ||||
| #include <asm-generic/pci.h> | ||||
| 
 | ||||
| #endif /* _ASM_TILE_PCI_H */ | ||||
|  | ||||
							
								
								
									
										40
									
								
								arch/tile/include/gxio/common.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										40
									
								
								arch/tile/include/gxio/common.h
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,40 @@ | ||||
| /*
 | ||||
|  * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||||
|  * | ||||
|  *   This program is free software; you can redistribute it and/or | ||||
|  *   modify it under the terms of the GNU General Public License | ||||
|  *   as published by the Free Software Foundation, version 2. | ||||
|  * | ||||
|  *   This program is distributed in the hope that it will be useful, but | ||||
|  *   WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||||
|  *   NON INFRINGEMENT.  See the GNU General Public License for | ||||
|  *   more details. | ||||
|  */ | ||||
| 
 | ||||
| #ifndef _GXIO_COMMON_H_ | ||||
| #define _GXIO_COMMON_H_ | ||||
| 
 | ||||
| /*
 | ||||
|  * Routines shared between the various GXIO device components. | ||||
|  */ | ||||
| 
 | ||||
| #include <hv/iorpc.h> | ||||
| 
 | ||||
| #include <linux/types.h> | ||||
| #include <linux/compiler.h> | ||||
| #include <linux/io.h> | ||||
| 
 | ||||
| /* Define the standard gxio MMIO functions using kernel functions. */ | ||||
| #define __gxio_mmio_read8(addr)		readb(addr) | ||||
| #define __gxio_mmio_read16(addr)	readw(addr) | ||||
| #define __gxio_mmio_read32(addr)	readl(addr) | ||||
| #define __gxio_mmio_read64(addr)	readq(addr) | ||||
| #define __gxio_mmio_write8(addr, val)	writeb((val), (addr)) | ||||
| #define __gxio_mmio_write16(addr, val)	writew((val), (addr)) | ||||
| #define __gxio_mmio_write32(addr, val)	writel((val), (addr)) | ||||
| #define __gxio_mmio_write64(addr, val)	writeq((val), (addr)) | ||||
| #define __gxio_mmio_read(addr)		__gxio_mmio_read64(addr) | ||||
| #define __gxio_mmio_write(addr, val)	__gxio_mmio_write64((addr), (val)) | ||||
| 
 | ||||
| #endif /* !_GXIO_COMMON_H_ */ | ||||
							
								
								
									
										161
									
								
								arch/tile/include/gxio/dma_queue.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										161
									
								
								arch/tile/include/gxio/dma_queue.h
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,161 @@ | ||||
| /*
 | ||||
|  * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||||
|  * | ||||
|  *   This program is free software; you can redistribute it and/or | ||||
|  *   modify it under the terms of the GNU General Public License | ||||
|  *   as published by the Free Software Foundation, version 2. | ||||
|  * | ||||
|  *   This program is distributed in the hope that it will be useful, but | ||||
|  *   WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||||
|  *   NON INFRINGEMENT.  See the GNU General Public License for | ||||
|  *   more details. | ||||
|  */ | ||||
| 
 | ||||
| #ifndef _GXIO_DMA_QUEUE_H_ | ||||
| #define _GXIO_DMA_QUEUE_H_ | ||||
| 
 | ||||
| /*
 | ||||
|  * DMA queue management APIs shared between TRIO and mPIPE. | ||||
|  */ | ||||
| 
 | ||||
| #include "common.h" | ||||
| 
 | ||||
| /* The credit counter lives in the high 32 bits. */ | ||||
| #define DMA_QUEUE_CREDIT_SHIFT 32 | ||||
| 
 | ||||
| /*
 | ||||
|  * State object that tracks a DMA queue's head and tail indices, as | ||||
|  * well as the number of commands posted and completed.  The | ||||
|  * structure is accessed via a thread-safe, lock-free algorithm. | ||||
|  */ | ||||
| typedef struct { | ||||
| 	/*
 | ||||
| 	 * Address of a MPIPE_EDMA_POST_REGION_VAL_t, | ||||
| 	 * TRIO_PUSH_DMA_REGION_VAL_t, or TRIO_PULL_DMA_REGION_VAL_t | ||||
| 	 * register.  These register have identical encodings and provide | ||||
| 	 * information about how many commands have been processed. | ||||
| 	 */ | ||||
| 	void *post_region_addr; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * A lazily-updated count of how many edescs the hardware has | ||||
| 	 * completed. | ||||
| 	 */ | ||||
| 	uint64_t hw_complete_count __attribute__ ((aligned(64))); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * High 32 bits are a count of available egress command credits, | ||||
| 	 * low 24 bits are the next egress "slot". | ||||
| 	 */ | ||||
| 	int64_t credits_and_next_index; | ||||
| 
 | ||||
| } __gxio_dma_queue_t; | ||||
| 
 | ||||
| /* Initialize a dma queue. */ | ||||
| extern void __gxio_dma_queue_init(__gxio_dma_queue_t *dma_queue, | ||||
| 				  void *post_region_addr, | ||||
| 				  unsigned int num_entries); | ||||
| 
 | ||||
| /*
 | ||||
|  * Update the "credits_and_next_index" and "hw_complete_count" fields | ||||
|  * based on pending hardware completions.  Note that some other thread | ||||
|  * may have already done this and, importantly, may still be in the | ||||
|  * process of updating "credits_and_next_index". | ||||
|  */ | ||||
| extern void __gxio_dma_queue_update_credits(__gxio_dma_queue_t *dma_queue); | ||||
| 
 | ||||
| /* Wait for credits to become available. */ | ||||
| extern int64_t __gxio_dma_queue_wait_for_credits(__gxio_dma_queue_t *dma_queue, | ||||
| 						 int64_t modifier); | ||||
| 
 | ||||
| /* Reserve slots in the queue, optionally waiting for slots to become
 | ||||
|  * available, and optionally returning a "completion_slot" suitable for | ||||
|  * direct comparison to "hw_complete_count". | ||||
|  */ | ||||
| static inline int64_t __gxio_dma_queue_reserve(__gxio_dma_queue_t *dma_queue, | ||||
| 					       unsigned int num, bool wait, | ||||
| 					       bool completion) | ||||
| { | ||||
| 	uint64_t slot; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Try to reserve 'num' egress command slots.  We do this by | ||||
| 	 * constructing a constant that subtracts N credits and adds N to | ||||
| 	 * the index, and using fetchaddgez to only apply it if the credits | ||||
| 	 * count doesn't go negative. | ||||
| 	 */ | ||||
| 	int64_t modifier = (((int64_t)(-num)) << DMA_QUEUE_CREDIT_SHIFT) | num; | ||||
| 	int64_t old = | ||||
| 		__insn_fetchaddgez(&dma_queue->credits_and_next_index, | ||||
| 				   modifier); | ||||
| 
 | ||||
| 	if (unlikely(old + modifier < 0)) { | ||||
| 		/*
 | ||||
| 		 * We're out of credits.  Try once to get more by checking for | ||||
| 		 * completed egress commands.  If that fails, wait or fail. | ||||
| 		 */ | ||||
| 		__gxio_dma_queue_update_credits(dma_queue); | ||||
| 		old = __insn_fetchaddgez(&dma_queue->credits_and_next_index, | ||||
| 					 modifier); | ||||
| 		if (old + modifier < 0) { | ||||
| 			if (wait) | ||||
| 				old = __gxio_dma_queue_wait_for_credits | ||||
| 					(dma_queue, modifier); | ||||
| 			else | ||||
| 				return GXIO_ERR_DMA_CREDITS; | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	/* The bottom 24 bits of old encode the "slot". */ | ||||
| 	slot = (old & 0xffffff); | ||||
| 
 | ||||
| 	if (completion) { | ||||
| 		/*
 | ||||
| 		 * A "completion_slot" is a "slot" which can be compared to | ||||
| 		 * "hw_complete_count" at any time in the future.  To convert | ||||
| 		 * "slot" into a "completion_slot", we access "hw_complete_count" | ||||
| 		 * once (knowing that we have reserved a slot, and thus, it will | ||||
| 		 * be "basically" accurate), and combine its high 40 bits with | ||||
| 		 * the 24 bit "slot", and handle "wrapping" by adding "1 << 24" | ||||
| 		 * if the result is LESS than "hw_complete_count". | ||||
| 		 */ | ||||
| 		uint64_t complete; | ||||
| 		complete = ACCESS_ONCE(dma_queue->hw_complete_count); | ||||
| 		slot |= (complete & 0xffffffffff000000); | ||||
| 		if (slot < complete) | ||||
| 			slot += 0x1000000; | ||||
| 	} | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * If any of our slots mod 256 were equivalent to 0, go ahead and | ||||
| 	 * collect some egress credits, and update "hw_complete_count", and | ||||
| 	 * make sure the index doesn't overflow into the credits. | ||||
| 	 */ | ||||
| 	if (unlikely(((old + num) & 0xff) < num)) { | ||||
| 		__gxio_dma_queue_update_credits(dma_queue); | ||||
| 
 | ||||
| 		/* Make sure the index doesn't overflow into the credits. */ | ||||
| #ifdef __BIG_ENDIAN__ | ||||
| 		*(((uint8_t *)&dma_queue->credits_and_next_index) + 4) = 0; | ||||
| #else | ||||
| 		*(((uint8_t *)&dma_queue->credits_and_next_index) + 3) = 0; | ||||
| #endif | ||||
| 	} | ||||
| 
 | ||||
| 	return slot; | ||||
| } | ||||
| 
 | ||||
| /* Non-inlinable "__gxio_dma_queue_reserve(..., true)". */ | ||||
| extern int64_t __gxio_dma_queue_reserve_aux(__gxio_dma_queue_t *dma_queue, | ||||
| 					    unsigned int num, int wait); | ||||
| 
 | ||||
| /* Check whether a particular "completion slot" has completed.
 | ||||
|  * | ||||
|  * Note that this function requires a "completion slot", and thus | ||||
|  * cannot be used with the result of any "reserve_fast" function. | ||||
|  */ | ||||
| extern int __gxio_dma_queue_is_complete(__gxio_dma_queue_t *dma_queue, | ||||
| 					int64_t completion_slot, int update); | ||||
| 
 | ||||
| #endif /* !_GXIO_DMA_QUEUE_H_ */ | ||||
							
								
								
									
										38
									
								
								arch/tile/include/gxio/iorpc_globals.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										38
									
								
								arch/tile/include/gxio/iorpc_globals.h
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,38 @@ | ||||
| /*
 | ||||
|  * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||||
|  * | ||||
|  *   This program is free software; you can redistribute it and/or | ||||
|  *   modify it under the terms of the GNU General Public License | ||||
|  *   as published by the Free Software Foundation, version 2. | ||||
|  * | ||||
|  *   This program is distributed in the hope that it will be useful, but | ||||
|  *   WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||||
|  *   NON INFRINGEMENT.  See the GNU General Public License for | ||||
|  *   more details. | ||||
|  */ | ||||
| 
 | ||||
| /* This file is machine-generated; DO NOT EDIT! */ | ||||
| #ifndef __IORPC_LINUX_RPC_H__ | ||||
| #define __IORPC_LINUX_RPC_H__ | ||||
| 
 | ||||
| #include <hv/iorpc.h> | ||||
| 
 | ||||
| #include <linux/string.h> | ||||
| #include <linux/module.h> | ||||
| #include <asm/pgtable.h> | ||||
| 
 | ||||
| #define IORPC_OP_ARM_POLLFD            IORPC_OPCODE(IORPC_FORMAT_KERNEL_POLLFD, 0x9000) | ||||
| #define IORPC_OP_CLOSE_POLLFD          IORPC_OPCODE(IORPC_FORMAT_KERNEL_POLLFD, 0x9001) | ||||
| #define IORPC_OP_GET_MMIO_BASE         IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000) | ||||
| #define IORPC_OP_CHECK_MMIO_OFFSET     IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001) | ||||
| 
 | ||||
| int __iorpc_arm_pollfd(int fd, int pollfd_cookie); | ||||
| 
 | ||||
| int __iorpc_close_pollfd(int fd, int pollfd_cookie); | ||||
| 
 | ||||
| int __iorpc_get_mmio_base(int fd, HV_PTE *base); | ||||
| 
 | ||||
| int __iorpc_check_mmio_offset(int fd, unsigned long offset, unsigned long size); | ||||
| 
 | ||||
| #endif /* !__IORPC_LINUX_RPC_H__ */ | ||||
							
								
								
									
										136
									
								
								arch/tile/include/gxio/iorpc_mpipe.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										136
									
								
								arch/tile/include/gxio/iorpc_mpipe.h
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,136 @@ | ||||
| /*
 | ||||
|  * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||||
|  * | ||||
|  *   This program is free software; you can redistribute it and/or | ||||
|  *   modify it under the terms of the GNU General Public License | ||||
|  *   as published by the Free Software Foundation, version 2. | ||||
|  * | ||||
|  *   This program is distributed in the hope that it will be useful, but | ||||
|  *   WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||||
|  *   NON INFRINGEMENT.  See the GNU General Public License for | ||||
|  *   more details. | ||||
|  */ | ||||
| 
 | ||||
| /* This file is machine-generated; DO NOT EDIT! */ | ||||
| #ifndef __GXIO_MPIPE_LINUX_RPC_H__ | ||||
| #define __GXIO_MPIPE_LINUX_RPC_H__ | ||||
| 
 | ||||
| #include <hv/iorpc.h> | ||||
| 
 | ||||
| #include <hv/drv_mpipe_intf.h> | ||||
| #include <asm/page.h> | ||||
| #include <gxio/kiorpc.h> | ||||
| #include <gxio/mpipe.h> | ||||
| #include <linux/string.h> | ||||
| #include <linux/module.h> | ||||
| #include <asm/pgtable.h> | ||||
| 
 | ||||
| #define GXIO_MPIPE_OP_ALLOC_BUFFER_STACKS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1200) | ||||
| #define GXIO_MPIPE_OP_INIT_BUFFER_STACK_AUX IORPC_OPCODE(IORPC_FORMAT_KERNEL_MEM, 0x1201) | ||||
| 
 | ||||
| #define GXIO_MPIPE_OP_ALLOC_NOTIF_RINGS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1203) | ||||
| #define GXIO_MPIPE_OP_INIT_NOTIF_RING_AUX IORPC_OPCODE(IORPC_FORMAT_KERNEL_MEM, 0x1204) | ||||
| #define GXIO_MPIPE_OP_REQUEST_NOTIF_RING_INTERRUPT IORPC_OPCODE(IORPC_FORMAT_KERNEL_INTERRUPT, 0x1205) | ||||
| #define GXIO_MPIPE_OP_ENABLE_NOTIF_RING_INTERRUPT IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1206) | ||||
| #define GXIO_MPIPE_OP_ALLOC_NOTIF_GROUPS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1207) | ||||
| #define GXIO_MPIPE_OP_INIT_NOTIF_GROUP IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1208) | ||||
| #define GXIO_MPIPE_OP_ALLOC_BUCKETS    IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1209) | ||||
| #define GXIO_MPIPE_OP_INIT_BUCKET      IORPC_OPCODE(IORPC_FORMAT_NONE, 0x120a) | ||||
| #define GXIO_MPIPE_OP_ALLOC_EDMA_RINGS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x120b) | ||||
| #define GXIO_MPIPE_OP_INIT_EDMA_RING_AUX IORPC_OPCODE(IORPC_FORMAT_KERNEL_MEM, 0x120c) | ||||
| 
 | ||||
| #define GXIO_MPIPE_OP_COMMIT_RULES     IORPC_OPCODE(IORPC_FORMAT_NONE, 0x120f) | ||||
| #define GXIO_MPIPE_OP_REGISTER_CLIENT_MEMORY IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1210) | ||||
| #define GXIO_MPIPE_OP_LINK_OPEN_AUX    IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1211) | ||||
| #define GXIO_MPIPE_OP_LINK_CLOSE_AUX   IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1212) | ||||
| 
 | ||||
| #define GXIO_MPIPE_OP_GET_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x121e) | ||||
| #define GXIO_MPIPE_OP_SET_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x121f) | ||||
| #define GXIO_MPIPE_OP_ADJUST_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1220) | ||||
| #define GXIO_MPIPE_OP_ARM_POLLFD       IORPC_OPCODE(IORPC_FORMAT_KERNEL_POLLFD, 0x9000) | ||||
| #define GXIO_MPIPE_OP_CLOSE_POLLFD     IORPC_OPCODE(IORPC_FORMAT_KERNEL_POLLFD, 0x9001) | ||||
| #define GXIO_MPIPE_OP_GET_MMIO_BASE    IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000) | ||||
| #define GXIO_MPIPE_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001) | ||||
| 
 | ||||
| int gxio_mpipe_alloc_buffer_stacks(gxio_mpipe_context_t * context, | ||||
| 				   unsigned int count, unsigned int first, | ||||
| 				   unsigned int flags); | ||||
| 
 | ||||
| int gxio_mpipe_init_buffer_stack_aux(gxio_mpipe_context_t * context, | ||||
| 				     void *mem_va, size_t mem_size, | ||||
| 				     unsigned int mem_flags, unsigned int stack, | ||||
| 				     unsigned int buffer_size_enum); | ||||
| 
 | ||||
| 
 | ||||
| int gxio_mpipe_alloc_notif_rings(gxio_mpipe_context_t * context, | ||||
| 				 unsigned int count, unsigned int first, | ||||
| 				 unsigned int flags); | ||||
| 
 | ||||
| int gxio_mpipe_init_notif_ring_aux(gxio_mpipe_context_t * context, void *mem_va, | ||||
| 				   size_t mem_size, unsigned int mem_flags, | ||||
| 				   unsigned int ring); | ||||
| 
 | ||||
| int gxio_mpipe_request_notif_ring_interrupt(gxio_mpipe_context_t * context, | ||||
| 					    int inter_x, int inter_y, | ||||
| 					    int inter_ipi, int inter_event, | ||||
| 					    unsigned int ring); | ||||
| 
 | ||||
| int gxio_mpipe_enable_notif_ring_interrupt(gxio_mpipe_context_t * context, | ||||
| 					   unsigned int ring); | ||||
| 
 | ||||
| int gxio_mpipe_alloc_notif_groups(gxio_mpipe_context_t * context, | ||||
| 				  unsigned int count, unsigned int first, | ||||
| 				  unsigned int flags); | ||||
| 
 | ||||
| int gxio_mpipe_init_notif_group(gxio_mpipe_context_t * context, | ||||
| 				unsigned int group, | ||||
| 				gxio_mpipe_notif_group_bits_t bits); | ||||
| 
 | ||||
| int gxio_mpipe_alloc_buckets(gxio_mpipe_context_t * context, unsigned int count, | ||||
| 			     unsigned int first, unsigned int flags); | ||||
| 
 | ||||
| int gxio_mpipe_init_bucket(gxio_mpipe_context_t * context, unsigned int bucket, | ||||
| 			   MPIPE_LBL_INIT_DAT_BSTS_TBL_t bucket_info); | ||||
| 
 | ||||
| int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t * context, | ||||
| 				unsigned int count, unsigned int first, | ||||
| 				unsigned int flags); | ||||
| 
 | ||||
| int gxio_mpipe_init_edma_ring_aux(gxio_mpipe_context_t * context, void *mem_va, | ||||
| 				  size_t mem_size, unsigned int mem_flags, | ||||
| 				  unsigned int ring, unsigned int channel); | ||||
| 
 | ||||
| 
 | ||||
| int gxio_mpipe_commit_rules(gxio_mpipe_context_t * context, const void *blob, | ||||
| 			    size_t blob_size); | ||||
| 
 | ||||
| int gxio_mpipe_register_client_memory(gxio_mpipe_context_t * context, | ||||
| 				      unsigned int iotlb, HV_PTE pte, | ||||
| 				      unsigned int flags); | ||||
| 
 | ||||
| int gxio_mpipe_link_open_aux(gxio_mpipe_context_t * context, | ||||
| 			     _gxio_mpipe_link_name_t name, unsigned int flags); | ||||
| 
 | ||||
| int gxio_mpipe_link_close_aux(gxio_mpipe_context_t * context, int mac); | ||||
| 
 | ||||
| 
 | ||||
| int gxio_mpipe_get_timestamp_aux(gxio_mpipe_context_t * context, uint64_t * sec, | ||||
| 				 uint64_t * nsec, uint64_t * cycles); | ||||
| 
 | ||||
| int gxio_mpipe_set_timestamp_aux(gxio_mpipe_context_t * context, uint64_t sec, | ||||
| 				 uint64_t nsec, uint64_t cycles); | ||||
| 
 | ||||
| int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t * context, | ||||
| 				    int64_t nsec); | ||||
| 
 | ||||
| int gxio_mpipe_arm_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie); | ||||
| 
 | ||||
| int gxio_mpipe_close_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie); | ||||
| 
 | ||||
| int gxio_mpipe_get_mmio_base(gxio_mpipe_context_t * context, HV_PTE *base); | ||||
| 
 | ||||
| int gxio_mpipe_check_mmio_offset(gxio_mpipe_context_t * context, | ||||
| 				 unsigned long offset, unsigned long size); | ||||
| 
 | ||||
| #endif /* !__GXIO_MPIPE_LINUX_RPC_H__ */ | ||||
							
								
								
									
										46
									
								
								arch/tile/include/gxio/iorpc_mpipe_info.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										46
									
								
								arch/tile/include/gxio/iorpc_mpipe_info.h
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,46 @@ | ||||
| /*
 | ||||
|  * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||||
|  * | ||||
|  *   This program is free software; you can redistribute it and/or | ||||
|  *   modify it under the terms of the GNU General Public License | ||||
|  *   as published by the Free Software Foundation, version 2. | ||||
|  * | ||||
|  *   This program is distributed in the hope that it will be useful, but | ||||
|  *   WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||||
|  *   NON INFRINGEMENT.  See the GNU General Public License for | ||||
|  *   more details. | ||||
|  */ | ||||
| 
 | ||||
| /* This file is machine-generated; DO NOT EDIT! */ | ||||
| #ifndef __GXIO_MPIPE_INFO_LINUX_RPC_H__ | ||||
| #define __GXIO_MPIPE_INFO_LINUX_RPC_H__ | ||||
| 
 | ||||
| #include <hv/iorpc.h> | ||||
| 
 | ||||
| #include <hv/drv_mpipe_intf.h> | ||||
| #include <asm/page.h> | ||||
| #include <gxio/kiorpc.h> | ||||
| #include <gxio/mpipe.h> | ||||
| #include <linux/string.h> | ||||
| #include <linux/module.h> | ||||
| #include <asm/pgtable.h> | ||||
| 
 | ||||
| 
 | ||||
| #define GXIO_MPIPE_INFO_OP_ENUMERATE_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1251) | ||||
| #define GXIO_MPIPE_INFO_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000) | ||||
| #define GXIO_MPIPE_INFO_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001) | ||||
| 
 | ||||
| 
 | ||||
| int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t * context, | ||||
| 				  unsigned int idx, | ||||
| 				  _gxio_mpipe_link_name_t * name, | ||||
| 				  _gxio_mpipe_link_mac_t * mac); | ||||
| 
 | ||||
| int gxio_mpipe_info_get_mmio_base(gxio_mpipe_info_context_t * context, | ||||
| 				  HV_PTE *base); | ||||
| 
 | ||||
| int gxio_mpipe_info_check_mmio_offset(gxio_mpipe_info_context_t * context, | ||||
| 				      unsigned long offset, unsigned long size); | ||||
| 
 | ||||
| #endif /* !__GXIO_MPIPE_INFO_LINUX_RPC_H__ */ | ||||
							
								
								
									
										97
									
								
								arch/tile/include/gxio/iorpc_trio.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										97
									
								
								arch/tile/include/gxio/iorpc_trio.h
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,97 @@ | ||||
| /*
 | ||||
|  * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||||
|  * | ||||
|  *   This program is free software; you can redistribute it and/or | ||||
|  *   modify it under the terms of the GNU General Public License | ||||
|  *   as published by the Free Software Foundation, version 2. | ||||
|  * | ||||
|  *   This program is distributed in the hope that it will be useful, but | ||||
|  *   WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||||
|  *   NON INFRINGEMENT.  See the GNU General Public License for | ||||
|  *   more details. | ||||
|  */ | ||||
| 
 | ||||
| /* This file is machine-generated; DO NOT EDIT! */ | ||||
| #ifndef __GXIO_TRIO_LINUX_RPC_H__ | ||||
| #define __GXIO_TRIO_LINUX_RPC_H__ | ||||
| 
 | ||||
| #include <hv/iorpc.h> | ||||
| 
 | ||||
| #include <hv/drv_trio_intf.h> | ||||
| #include <gxio/trio.h> | ||||
| #include <gxio/kiorpc.h> | ||||
| #include <linux/string.h> | ||||
| #include <linux/module.h> | ||||
| #include <asm/pgtable.h> | ||||
| 
 | ||||
| #define GXIO_TRIO_OP_ALLOC_ASIDS       IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1400) | ||||
| 
 | ||||
| #define GXIO_TRIO_OP_ALLOC_MEMORY_MAPS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1402) | ||||
| 
 | ||||
| #define GXIO_TRIO_OP_ALLOC_PIO_REGIONS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x140e) | ||||
| #define GXIO_TRIO_OP_INIT_PIO_REGION_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x140f) | ||||
| 
 | ||||
| #define GXIO_TRIO_OP_INIT_MEMORY_MAP_MMU_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1417) | ||||
| #define GXIO_TRIO_OP_GET_PORT_PROPERTY IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1418) | ||||
| #define GXIO_TRIO_OP_CONFIG_LEGACY_INTR IORPC_OPCODE(IORPC_FORMAT_KERNEL_INTERRUPT, 0x1419) | ||||
| #define GXIO_TRIO_OP_CONFIG_MSI_INTR   IORPC_OPCODE(IORPC_FORMAT_KERNEL_INTERRUPT, 0x141a) | ||||
| 
 | ||||
| #define GXIO_TRIO_OP_SET_MPS_MRS       IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x141c) | ||||
| #define GXIO_TRIO_OP_FORCE_RC_LINK_UP  IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x141d) | ||||
| #define GXIO_TRIO_OP_FORCE_EP_LINK_UP  IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x141e) | ||||
| #define GXIO_TRIO_OP_GET_MMIO_BASE     IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000) | ||||
| #define GXIO_TRIO_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001) | ||||
| 
 | ||||
| int gxio_trio_alloc_asids(gxio_trio_context_t * context, unsigned int count, | ||||
| 			  unsigned int first, unsigned int flags); | ||||
| 
 | ||||
| 
 | ||||
| int gxio_trio_alloc_memory_maps(gxio_trio_context_t * context, | ||||
| 				unsigned int count, unsigned int first, | ||||
| 				unsigned int flags); | ||||
| 
 | ||||
| 
 | ||||
| int gxio_trio_alloc_pio_regions(gxio_trio_context_t * context, | ||||
| 				unsigned int count, unsigned int first, | ||||
| 				unsigned int flags); | ||||
| 
 | ||||
| int gxio_trio_init_pio_region_aux(gxio_trio_context_t * context, | ||||
| 				  unsigned int pio_region, unsigned int mac, | ||||
| 				  uint32_t bus_address_hi, unsigned int flags); | ||||
| 
 | ||||
| 
 | ||||
| int gxio_trio_init_memory_map_mmu_aux(gxio_trio_context_t * context, | ||||
| 				      unsigned int map, unsigned long va, | ||||
| 				      uint64_t size, unsigned int asid, | ||||
| 				      unsigned int mac, uint64_t bus_address, | ||||
| 				      unsigned int node, | ||||
| 				      unsigned int order_mode); | ||||
| 
 | ||||
| int gxio_trio_get_port_property(gxio_trio_context_t * context, | ||||
| 				struct pcie_trio_ports_property *trio_ports); | ||||
| 
 | ||||
| int gxio_trio_config_legacy_intr(gxio_trio_context_t * context, int inter_x, | ||||
| 				 int inter_y, int inter_ipi, int inter_event, | ||||
| 				 unsigned int mac, unsigned int intx); | ||||
| 
 | ||||
| int gxio_trio_config_msi_intr(gxio_trio_context_t * context, int inter_x, | ||||
| 			      int inter_y, int inter_ipi, int inter_event, | ||||
| 			      unsigned int mac, unsigned int mem_map, | ||||
| 			      uint64_t mem_map_base, uint64_t mem_map_limit, | ||||
| 			      unsigned int asid); | ||||
| 
 | ||||
| 
 | ||||
| int gxio_trio_set_mps_mrs(gxio_trio_context_t * context, uint16_t mps, | ||||
| 			  uint16_t mrs, unsigned int mac); | ||||
| 
 | ||||
| int gxio_trio_force_rc_link_up(gxio_trio_context_t * context, unsigned int mac); | ||||
| 
 | ||||
| int gxio_trio_force_ep_link_up(gxio_trio_context_t * context, unsigned int mac); | ||||
| 
 | ||||
| int gxio_trio_get_mmio_base(gxio_trio_context_t * context, HV_PTE *base); | ||||
| 
 | ||||
| int gxio_trio_check_mmio_offset(gxio_trio_context_t * context, | ||||
| 				unsigned long offset, unsigned long size); | ||||
| 
 | ||||
| #endif /* !__GXIO_TRIO_LINUX_RPC_H__ */ | ||||
							
								
								
									
										46
									
								
								arch/tile/include/gxio/iorpc_usb_host.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										46
									
								
								arch/tile/include/gxio/iorpc_usb_host.h
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,46 @@ | ||||
| /*
 | ||||
|  * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||||
|  * | ||||
|  *   This program is free software; you can redistribute it and/or | ||||
|  *   modify it under the terms of the GNU General Public License | ||||
|  *   as published by the Free Software Foundation, version 2. | ||||
|  * | ||||
|  *   This program is distributed in the hope that it will be useful, but | ||||
|  *   WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||||
|  *   NON INFRINGEMENT.  See the GNU General Public License for | ||||
|  *   more details. | ||||
|  */ | ||||
| 
 | ||||
| /* This file is machine-generated; DO NOT EDIT! */ | ||||
| #ifndef __GXIO_USB_HOST_LINUX_RPC_H__ | ||||
| #define __GXIO_USB_HOST_LINUX_RPC_H__ | ||||
| 
 | ||||
| #include <hv/iorpc.h> | ||||
| 
 | ||||
| #include <hv/drv_usb_host_intf.h> | ||||
| #include <asm/page.h> | ||||
| #include <gxio/kiorpc.h> | ||||
| #include <gxio/usb_host.h> | ||||
| #include <linux/string.h> | ||||
| #include <linux/module.h> | ||||
| #include <asm/pgtable.h> | ||||
| 
 | ||||
| #define GXIO_USB_HOST_OP_CFG_INTERRUPT IORPC_OPCODE(IORPC_FORMAT_KERNEL_INTERRUPT, 0x1800) | ||||
| #define GXIO_USB_HOST_OP_REGISTER_CLIENT_MEMORY IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1801) | ||||
| #define GXIO_USB_HOST_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000) | ||||
| #define GXIO_USB_HOST_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001) | ||||
| 
 | ||||
| int gxio_usb_host_cfg_interrupt(gxio_usb_host_context_t * context, int inter_x, | ||||
| 				int inter_y, int inter_ipi, int inter_event); | ||||
| 
 | ||||
| int gxio_usb_host_register_client_memory(gxio_usb_host_context_t * context, | ||||
| 					 HV_PTE pte, unsigned int flags); | ||||
| 
 | ||||
| int gxio_usb_host_get_mmio_base(gxio_usb_host_context_t * context, | ||||
| 				HV_PTE *base); | ||||
| 
 | ||||
| int gxio_usb_host_check_mmio_offset(gxio_usb_host_context_t * context, | ||||
| 				    unsigned long offset, unsigned long size); | ||||
| 
 | ||||
| #endif /* !__GXIO_USB_HOST_LINUX_RPC_H__ */ | ||||
							
								
								
									
										29
									
								
								arch/tile/include/gxio/kiorpc.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										29
									
								
								arch/tile/include/gxio/kiorpc.h
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,29 @@ | ||||
| /*
 | ||||
|  * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||||
|  * | ||||
|  *   This program is free software; you can redistribute it and/or | ||||
|  *   modify it under the terms of the GNU General Public License | ||||
|  *   as published by the Free Software Foundation, version 2. | ||||
|  * | ||||
|  *   This program is distributed in the hope that it will be useful, but | ||||
|  *   WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||||
|  *   NON INFRINGEMENT.  See the GNU General Public License for | ||||
|  *   more details. | ||||
|  * | ||||
|  * Support routines for kernel IORPC drivers. | ||||
|  */ | ||||
| 
 | ||||
| #ifndef _GXIO_KIORPC_H | ||||
| #define _GXIO_KIORPC_H | ||||
| 
 | ||||
| #include <linux/types.h> | ||||
| #include <asm/page.h> | ||||
| #include <arch/chip.h> | ||||
| 
 | ||||
| #if CHIP_HAS_MMIO() | ||||
| void __iomem *iorpc_ioremap(int hv_fd, resource_size_t offset, | ||||
| 			    unsigned long size); | ||||
| #endif | ||||
| 
 | ||||
| #endif /* _GXIO_KIORPC_H */ | ||||
							
								
								
									
										1736
									
								
								arch/tile/include/gxio/mpipe.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1736
									
								
								arch/tile/include/gxio/mpipe.h
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										298
									
								
								arch/tile/include/gxio/trio.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										298
									
								
								arch/tile/include/gxio/trio.h
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,298 @@ | ||||
| /*
 | ||||
|  * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||||
|  * | ||||
|  *   This program is free software; you can redistribute it and/or | ||||
|  *   modify it under the terms of the GNU General Public License | ||||
|  *   as published by the Free Software Foundation, version 2. | ||||
|  * | ||||
|  *   This program is distributed in the hope that it will be useful, but | ||||
|  *   WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||||
|  *   NON INFRINGEMENT.  See the GNU General Public License for | ||||
|  *   more details. | ||||
|  */ | ||||
| 
 | ||||
| /*
 | ||||
|  * | ||||
|  * An API for allocating, configuring, and manipulating TRIO hardware | ||||
|  * resources | ||||
|  */ | ||||
| 
 | ||||
| /*
 | ||||
|  * | ||||
|  * The TILE-Gx TRIO shim provides connections to external devices via | ||||
|  * PCIe or other transaction IO standards.  The gxio_trio_ API, | ||||
|  * declared in <gxio/trio.h>, allows applications to allocate and | ||||
|  * configure TRIO IO resources like DMA command rings, memory map | ||||
|  * windows, and device interrupts.  The following sections introduce | ||||
|  * the various components of the API.  We strongly recommend reading | ||||
|  * the TRIO section of the IO Device Guide (UG404) before working with | ||||
|  * this API. | ||||
|  * | ||||
|  * @section trio__ingress TRIO Ingress Hardware Resources | ||||
|  * | ||||
|  * The TRIO ingress hardware is responsible for examining incoming | ||||
|  * PCIe or StreamIO packets and choosing a processing mechanism based | ||||
|  * on the packets' bus address.  The gxio_trio_ API can be used to | ||||
|  * configure different handlers for different ranges of bus address | ||||
|  * space.  The user can configure "mapped memory" and "scatter queue" | ||||
|  * regions to match incoming packets within 4kB-aligned ranges of bus | ||||
|  * addresses.  Each range specifies a different set of mapping | ||||
|  * parameters to be applied when handling the ingress packet.  The | ||||
|  * following sections describe how to work with MapMem and scatter | ||||
|  * queue regions. | ||||
|  * | ||||
|  * @subsection trio__mapmem TRIO MapMem Regions | ||||
|  * | ||||
|  * TRIO mapped memory (or MapMem) regions allow the user to map | ||||
|  * incoming read and write requests directly to the application's | ||||
|  * memory space.  MapMem regions are allocated via | ||||
|  * gxio_trio_alloc_memory_maps().  Given an integer MapMem number, | ||||
|  * applications can use gxio_trio_init_memory_map() to specify the | ||||
|  * range of bus addresses that will match the region and the range of | ||||
|  * virtual addresses to which those packets will be applied. | ||||
|  * | ||||
|  * As with many other gxio APIs, the programmer must be sure to | ||||
|  * register memory pages that will be used with MapMem regions.  Pages | ||||
|  * can be registered with TRIO by allocating an ASID (address space | ||||
|  * identifier) and then using gxio_trio_register_page() to register up to | ||||
|  * 16 pages with the hardware.  The initialization functions for | ||||
|  * resources that require registered memory (MapMem, scatter queues, | ||||
|  * push DMA, and pull DMA) then take an 'asid' parameter in order to | ||||
|  * configure which set of registered pages is used by each resource. | ||||
|  * | ||||
|  * @subsection trio__scatter_queue TRIO Scatter Queues | ||||
|  * | ||||
|  * The TRIO shim's scatter queue regions allow users to dynamically | ||||
|  * map buffers from a large address space into a small range of bus | ||||
|  * addresses.  This is particularly helpful for PCIe endpoint devices, | ||||
|  * where the host generally limits the size of BARs to tens of | ||||
|  * megabytes. | ||||
|  * | ||||
|  * Each scatter queue consists of a memory map region, a queue of | ||||
|  * tile-side buffer VAs to be mapped to that region, and a bus-mapped | ||||
|  * "doorbell" register that the remote endpoint can write to trigger a | ||||
|  * dequeue of the current buffer VA, thus swapping in a new buffer. | ||||
|  * The VAs pushed onto a scatter queue must be 4kB aligned, so | ||||
|  * applications may need to use higher-level protocols to inform | ||||
|  * remote entities that they should apply some additional, sub-4kB | ||||
|  * offset when reading or writing the scatter queue region.  For more | ||||
|  * information, see the IO Device Guide (UG404). | ||||
|  * | ||||
|  * @section trio__egress TRIO Egress Hardware Resources | ||||
|  * | ||||
|  * The TRIO shim supports two mechanisms for egress packet generation: | ||||
|  * programmed IO (PIO) and push/pull DMA.  PIO allows applications to | ||||
|  * create MMIO mappings for PCIe or StreamIO address space, such that | ||||
|  * the application can generate word-sized read or write transactions | ||||
|  * by issuing load or store instructions.  Push and pull DMA are tuned | ||||
|  * for larger transactions; they use specialized hardware engines to | ||||
|  * transfer large blocks of data at line rate. | ||||
|  * | ||||
|  * @subsection trio__pio TRIO Programmed IO | ||||
|  * | ||||
|  * Programmed IO allows applications to create MMIO mappings for PCIe | ||||
|  * or StreamIO address space.  The hardware PIO regions support access | ||||
|  * to PCIe configuration, IO, and memory space, but the gxio_trio API | ||||
|  * only supports memory space accesses.  PIO regions are allocated | ||||
|  * with gxio_trio_alloc_pio_regions() and initialized via | ||||
|  * gxio_trio_init_pio_region().  Once a region is bound to a range of | ||||
|  * bus address via the initialization function, the application can | ||||
|  * use gxio_trio_map_pio_region() to create MMIO mappings from its VA | ||||
|  * space onto the range of bus addresses supported by the PIO region. | ||||
|  * | ||||
|  * @subsection trio_dma TRIO Push and Pull DMA | ||||
|  * | ||||
|  * The TRIO push and pull DMA engines allow users to copy blocks of | ||||
|  * data between application memory and the bus.  Push DMA generates | ||||
|  * write packets that copy from application memory to the bus and pull | ||||
|  * DMA generates read packets that copy from the bus into application | ||||
|  * memory.  The DMA engines are managed via an API that is very | ||||
|  * similar to the mPIPE eDMA interface.  For a detailed explanation of | ||||
|  * the eDMA queue API, see @ref gxio_mpipe_wrappers. | ||||
|  * | ||||
|  * Push and pull DMA queues are allocated via | ||||
|  * gxio_trio_alloc_push_dma_ring() / gxio_trio_alloc_pull_dma_ring(). | ||||
|  * Once allocated, users generally use a ::gxio_trio_dma_queue_t | ||||
|  * object to manage the queue, providing easy wrappers for reserving | ||||
|  * command slots in the DMA command ring, filling those slots, and | ||||
|  * waiting for commands to complete.  DMA queues can be initialized | ||||
|  * via gxio_trio_init_push_dma_queue() or | ||||
|  * gxio_trio_init_pull_dma_queue(). | ||||
|  * | ||||
|  * See @ref trio/push_dma/app.c for an example of how to use push DMA. | ||||
|  * | ||||
|  * @section trio_shortcomings Plans for Future API Revisions | ||||
|  * | ||||
|  * The simulation framework is incomplete.  Future features include: | ||||
|  * | ||||
|  * - Support for reset and deallocation of resources. | ||||
|  * | ||||
|  * - Support for pull DMA. | ||||
|  * | ||||
|  * - Support for interrupt regions and user-space interrupt delivery. | ||||
|  * | ||||
|  * - Support for getting BAR mappings and reserving regions of BAR | ||||
|  *   address space. | ||||
|  */ | ||||
| #ifndef _GXIO_TRIO_H_ | ||||
| #define _GXIO_TRIO_H_ | ||||
| 
 | ||||
| #include <linux/types.h> | ||||
| 
 | ||||
| #include "common.h" | ||||
| #include "dma_queue.h" | ||||
| 
 | ||||
| #include <arch/trio_constants.h> | ||||
| #include <arch/trio.h> | ||||
| #include <arch/trio_pcie_intfc.h> | ||||
| #include <arch/trio_pcie_rc.h> | ||||
| #include <arch/trio_shm.h> | ||||
| #include <hv/drv_trio_intf.h> | ||||
| #include <hv/iorpc.h> | ||||
| 
 | ||||
| /* A context object used to manage TRIO hardware resources. */ | ||||
| typedef struct { | ||||
| 
 | ||||
| 	/* File descriptor for calling up to Linux (and thus the HV). */ | ||||
| 	int fd; | ||||
| 
 | ||||
| 	/* The VA at which the MAC MMIO registers are mapped. */ | ||||
| 	char *mmio_base_mac; | ||||
| 
 | ||||
| 	/* The VA at which the PIO config space are mapped for each PCIe MAC.
 | ||||
| 	   Gx36 has max 3 PCIe MACs per TRIO shim. */ | ||||
| 	char *mmio_base_pio_cfg[TILEGX_TRIO_PCIES]; | ||||
| 
 | ||||
| #ifdef USE_SHARED_PCIE_CONFIG_REGION | ||||
| 	/* Index of the shared PIO region for PCI config access. */ | ||||
| 	int pio_cfg_index; | ||||
| #else | ||||
| 	/* Index of the PIO region for PCI config access per MAC. */ | ||||
| 	int pio_cfg_index[TILEGX_TRIO_PCIES]; | ||||
| #endif | ||||
| 
 | ||||
| 	/*  The VA at which the push DMA MMIO registers are mapped. */ | ||||
| 	char *mmio_push_dma[TRIO_NUM_PUSH_DMA_RINGS]; | ||||
| 
 | ||||
| 	/*  The VA at which the pull DMA MMIO registers are mapped. */ | ||||
| 	char *mmio_pull_dma[TRIO_NUM_PUSH_DMA_RINGS]; | ||||
| 
 | ||||
| 	/* Application space ID. */ | ||||
| 	unsigned int asid; | ||||
| 
 | ||||
| } gxio_trio_context_t; | ||||
| 
 | ||||
| /* Command descriptor for push or pull DMA. */ | ||||
| typedef TRIO_DMA_DESC_t gxio_trio_dma_desc_t; | ||||
| 
 | ||||
| /* A convenient, thread-safe interface to an eDMA ring. */ | ||||
| typedef struct { | ||||
| 
 | ||||
| 	/* State object for tracking head and tail pointers. */ | ||||
| 	__gxio_dma_queue_t dma_queue; | ||||
| 
 | ||||
| 	/* The ring entries. */ | ||||
| 	gxio_trio_dma_desc_t *dma_descs; | ||||
| 
 | ||||
| 	/* The number of entries minus one. */ | ||||
| 	unsigned long mask_num_entries; | ||||
| 
 | ||||
| 	/* The log2() of the number of entries. */ | ||||
| 	unsigned int log2_num_entries; | ||||
| 
 | ||||
| } gxio_trio_dma_queue_t; | ||||
| 
 | ||||
| /* Initialize a TRIO context.
 | ||||
|  * | ||||
|  * This function allocates a TRIO "service domain" and maps the MMIO | ||||
|  * registers into the the caller's VA space. | ||||
|  * | ||||
|  * @param trio_index Which TRIO shim; Gx36 must pass 0. | ||||
|  * @param context Context object to be initialized. | ||||
|  */ | ||||
| extern int gxio_trio_init(gxio_trio_context_t *context, | ||||
| 			  unsigned int trio_index); | ||||
| 
 | ||||
| /* This indicates that an ASID hasn't been allocated. */ | ||||
| #define GXIO_ASID_NULL -1 | ||||
| 
 | ||||
| /* Ordering modes for map memory regions and scatter queue regions. */ | ||||
| typedef enum gxio_trio_order_mode_e { | ||||
| 	/* Writes are not ordered.  Reads always wait for previous writes. */ | ||||
| 	GXIO_TRIO_ORDER_MODE_UNORDERED = | ||||
| 		TRIO_MAP_MEM_SETUP__ORDER_MODE_VAL_UNORDERED, | ||||
| 	/* Both writes and reads wait for previous transactions to complete. */ | ||||
| 	GXIO_TRIO_ORDER_MODE_STRICT = | ||||
| 		TRIO_MAP_MEM_SETUP__ORDER_MODE_VAL_STRICT, | ||||
| 	/* Writes are ordered unless the incoming packet has the
 | ||||
| 	   relaxed-ordering attributes set. */ | ||||
| 	GXIO_TRIO_ORDER_MODE_OBEY_PACKET = | ||||
| 		TRIO_MAP_MEM_SETUP__ORDER_MODE_VAL_REL_ORD | ||||
| } gxio_trio_order_mode_t; | ||||
| 
 | ||||
| /* Initialize a memory mapping region.
 | ||||
|  * | ||||
|  * @param context An initialized TRIO context. | ||||
|  * @param map A Memory map region allocated by gxio_trio_alloc_memory_map(). | ||||
|  * @param target_mem VA of backing memory, should be registered via | ||||
|  *   gxio_trio_register_page() and aligned to 4kB. | ||||
|  * @param target_size Length of the memory mapping, must be a multiple | ||||
|  * of 4kB. | ||||
|  * @param asid ASID to be used for Tile-side address translation. | ||||
|  * @param mac MAC number. | ||||
|  * @param bus_address Bus address at which the mapping starts. | ||||
|  * @param order_mode Memory ordering mode for this mapping. | ||||
|  * @return Zero on success, else ::GXIO_TRIO_ERR_BAD_MEMORY_MAP, | ||||
|  * GXIO_TRIO_ERR_BAD_ASID, or ::GXIO_TRIO_ERR_BAD_BUS_RANGE. | ||||
|  */ | ||||
| extern int gxio_trio_init_memory_map(gxio_trio_context_t *context, | ||||
| 				     unsigned int map, void *target_mem, | ||||
| 				     size_t target_size, unsigned int asid, | ||||
| 				     unsigned int mac, uint64_t bus_address, | ||||
| 				     gxio_trio_order_mode_t order_mode); | ||||
| 
 | ||||
| /* Flags that can be passed to resource allocation functions. */ | ||||
| enum gxio_trio_alloc_flags_e { | ||||
| 	GXIO_TRIO_ALLOC_FIXED = HV_TRIO_ALLOC_FIXED, | ||||
| }; | ||||
| 
 | ||||
| /* Flags that can be passed to memory registration functions. */ | ||||
| enum gxio_trio_mem_flags_e { | ||||
| 	/* Do not fill L3 when writing, and invalidate lines upon egress. */ | ||||
| 	GXIO_TRIO_MEM_FLAG_NT_HINT = IORPC_MEM_BUFFER_FLAG_NT_HINT, | ||||
| 
 | ||||
| 	/* L3 cache fills should only populate IO cache ways. */ | ||||
| 	GXIO_TRIO_MEM_FLAG_IO_PIN = IORPC_MEM_BUFFER_FLAG_IO_PIN, | ||||
| }; | ||||
| 
 | ||||
| /* Flag indicating a request generator uses a special traffic
 | ||||
|     class. */ | ||||
| #define GXIO_TRIO_FLAG_TRAFFIC_CLASS(N) HV_TRIO_FLAG_TC(N) | ||||
| 
 | ||||
| /* Flag indicating a request generator uses a virtual function
 | ||||
|     number. */ | ||||
| #define GXIO_TRIO_FLAG_VFUNC(N) HV_TRIO_FLAG_VFUNC(N) | ||||
| 
 | ||||
| /*****************************************************************
 | ||||
|  *                       Memory Registration                      * | ||||
|  ******************************************************************/ | ||||
| 
 | ||||
| /* Allocate Application Space Identifiers (ASIDs).  Each ASID can
 | ||||
|  * register up to 16 page translations.  ASIDs are used by memory map | ||||
|  * regions, scatter queues, and DMA queues to translate application | ||||
|  * VAs into memory system PAs. | ||||
|  * | ||||
|  * @param context An initialized TRIO context. | ||||
|  * @param count Number of ASIDs required. | ||||
|  * @param first Index of first ASID if ::GXIO_TRIO_ALLOC_FIXED flag | ||||
|  *   is set, otherwise ignored. | ||||
|  * @param flags Flag bits, including bits from ::gxio_trio_alloc_flags_e. | ||||
|  * @return Index of first ASID, or ::GXIO_TRIO_ERR_NO_ASID if allocation | ||||
|  *   failed. | ||||
|  */ | ||||
| extern int gxio_trio_alloc_asids(gxio_trio_context_t *context, | ||||
| 				 unsigned int count, unsigned int first, | ||||
| 				 unsigned int flags); | ||||
| 
 | ||||
| #endif /* ! _GXIO_TRIO_H_ */ | ||||
							
								
								
									
										87
									
								
								arch/tile/include/gxio/usb_host.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										87
									
								
								arch/tile/include/gxio/usb_host.h
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,87 @@ | ||||
| /*
 | ||||
|  * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||||
|  * | ||||
|  *   This program is free software; you can redistribute it and/or | ||||
|  *   modify it under the terms of the GNU General Public License | ||||
|  *   as published by the Free Software Foundation, version 2. | ||||
|  * | ||||
|  *   This program is distributed in the hope that it will be useful, but | ||||
|  *   WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||||
|  *   NON INFRINGEMENT.  See the GNU General Public License for | ||||
|  *   more details. | ||||
|  */ | ||||
| #ifndef _GXIO_USB_H_ | ||||
| #define _GXIO_USB_H_ | ||||
| 
 | ||||
| #include "common.h" | ||||
| 
 | ||||
| #include <hv/drv_usb_host_intf.h> | ||||
| #include <hv/iorpc.h> | ||||
| 
 | ||||
| /*
 | ||||
|  * | ||||
|  * An API for manipulating general-purpose I/O pins. | ||||
|  */ | ||||
| 
 | ||||
| /*
 | ||||
|  * | ||||
|  * The USB shim allows access to the processor's Universal Serial Bus | ||||
|  * connections. | ||||
|  */ | ||||
| 
 | ||||
| /* A context object used to manage USB hardware resources. */ | ||||
| typedef struct { | ||||
| 
 | ||||
| 	/* File descriptor for calling up to the hypervisor. */ | ||||
| 	int fd; | ||||
| 
 | ||||
| 	/* The VA at which our MMIO registers are mapped. */ | ||||
| 	char *mmio_base; | ||||
| } gxio_usb_host_context_t; | ||||
| 
 | ||||
| /* Initialize a USB context.
 | ||||
|  * | ||||
|  *  A properly initialized context must be obtained before any of the other | ||||
|  *  gxio_usb_host routines may be used. | ||||
|  * | ||||
|  * @param context Pointer to a gxio_usb_host_context_t, which will be | ||||
|  *  initialized by this routine, if it succeeds. | ||||
|  * @param usb_index Index of the USB shim to use. | ||||
|  * @param is_ehci Nonzero to use the EHCI interface; zero to use the OHCI | ||||
|  *  intereface. | ||||
|  * @return Zero if the context was successfully initialized, else a | ||||
|  *  GXIO_ERR_xxx error code. | ||||
|  */ | ||||
| extern int gxio_usb_host_init(gxio_usb_host_context_t * context, int usb_index, | ||||
| 			      int is_ehci); | ||||
| 
 | ||||
| /* Destroy a USB context.
 | ||||
|  * | ||||
|  *  Once destroyed, a context may not be used with any gxio_usb_host routines | ||||
|  *  other than gxio_usb_host_init().  After this routine returns, no further | ||||
|  *  interrupts or signals requested on this context will be delivered.  The | ||||
|  *  state and configuration of the pins which had been attached to this | ||||
|  *  context are unchanged by this operation. | ||||
|  * | ||||
|  * @param context Pointer to a gxio_usb_host_context_t. | ||||
|  * @return Zero if the context was successfully destroyed, else a | ||||
|  *  GXIO_ERR_xxx error code. | ||||
|  */ | ||||
| extern int gxio_usb_host_destroy(gxio_usb_host_context_t * context); | ||||
| 
 | ||||
| /* Retrieve the address of the shim's MMIO registers.
 | ||||
|  * | ||||
|  * @param context Pointer to a properly initialized gxio_usb_host_context_t. | ||||
|  * @return The address of the shim's MMIO registers. | ||||
|  */ | ||||
| extern void *gxio_usb_host_get_reg_start(gxio_usb_host_context_t * context); | ||||
| 
 | ||||
| /* Retrieve the length of the shim's MMIO registers.
 | ||||
|  * | ||||
|  * @param context Pointer to a properly initialized gxio_usb_host_context_t. | ||||
|  * @return The length of the shim's MMIO registers. | ||||
|  */ | ||||
| extern size_t gxio_usb_host_get_reg_len(gxio_usb_host_context_t * context); | ||||
| 
 | ||||
| #endif /* _GXIO_USB_H_ */ | ||||
							
								
								
									
										602
									
								
								arch/tile/include/hv/drv_mpipe_intf.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										602
									
								
								arch/tile/include/hv/drv_mpipe_intf.h
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,602 @@ | ||||
| /*
 | ||||
|  * Copyright 2011 Tilera Corporation. All Rights Reserved. | ||||
|  * | ||||
|  *   This program is free software; you can redistribute it and/or | ||||
|  *   modify it under the terms of the GNU General Public License | ||||
|  *   as published by the Free Software Foundation, version 2. | ||||
|  * | ||||
|  *   This program is distributed in the hope that it will be useful, but | ||||
|  *   WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||||
|  *   NON INFRINGEMENT.  See the GNU General Public License for | ||||
|  *   more details. | ||||
|  */ | ||||
| 
 | ||||
| /**
 | ||||
|  * Interface definitions for the mpipe driver. | ||||
|  */ | ||||
| 
 | ||||
| #ifndef _SYS_HV_DRV_MPIPE_INTF_H | ||||
| #define _SYS_HV_DRV_MPIPE_INTF_H | ||||
| 
 | ||||
| #include <arch/mpipe.h> | ||||
| #include <arch/mpipe_constants.h> | ||||
| 
 | ||||
| 
 | ||||
| /** Number of buffer stacks (32). */ | ||||
| #define HV_MPIPE_NUM_BUFFER_STACKS \ | ||||
|   (MPIPE_MMIO_INIT_DAT_GX36_1__BUFFER_STACK_MASK_WIDTH) | ||||
| 
 | ||||
| /** Number of NotifRings (256). */ | ||||
| #define HV_MPIPE_NUM_NOTIF_RINGS (MPIPE_NUM_NOTIF_RINGS) | ||||
| 
 | ||||
| /** Number of NotifGroups (32). */ | ||||
| #define HV_MPIPE_NUM_NOTIF_GROUPS (MPIPE_NUM_NOTIF_GROUPS) | ||||
| 
 | ||||
| /** Number of buckets (4160). */ | ||||
| #define HV_MPIPE_NUM_BUCKETS (MPIPE_NUM_BUCKETS) | ||||
| 
 | ||||
| /** Number of "lo" buckets (4096). */ | ||||
| #define HV_MPIPE_NUM_LO_BUCKETS 4096 | ||||
| 
 | ||||
| /** Number of "hi" buckets (64). */ | ||||
| #define HV_MPIPE_NUM_HI_BUCKETS \ | ||||
|   (HV_MPIPE_NUM_BUCKETS - HV_MPIPE_NUM_LO_BUCKETS) | ||||
| 
 | ||||
| /** Number of edma rings (24). */ | ||||
| #define HV_MPIPE_NUM_EDMA_RINGS \ | ||||
|   (MPIPE_MMIO_INIT_DAT_GX36_1__EDMA_POST_MASK_WIDTH) | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| /** A flag bit indicating a fixed resource allocation. */ | ||||
| #define HV_MPIPE_ALLOC_FIXED 0x01 | ||||
| 
 | ||||
| /** Offset for the config register MMIO region. */ | ||||
| #define HV_MPIPE_CONFIG_MMIO_OFFSET \ | ||||
|   (MPIPE_MMIO_ADDR__REGION_VAL_CFG << MPIPE_MMIO_ADDR__REGION_SHIFT) | ||||
| 
 | ||||
| /** Size of the config register MMIO region. */ | ||||
| #define HV_MPIPE_CONFIG_MMIO_SIZE (64 * 1024) | ||||
| 
 | ||||
| /** Offset for the config register MMIO region. */ | ||||
| #define HV_MPIPE_FAST_MMIO_OFFSET \ | ||||
|   (MPIPE_MMIO_ADDR__REGION_VAL_IDMA << MPIPE_MMIO_ADDR__REGION_SHIFT) | ||||
| 
 | ||||
| /** Size of the fast register MMIO region (IDMA, EDMA, buffer stack). */ | ||||
| #define HV_MPIPE_FAST_MMIO_SIZE \ | ||||
|   ((MPIPE_MMIO_ADDR__REGION_VAL_BSM + 1 - MPIPE_MMIO_ADDR__REGION_VAL_IDMA) \ | ||||
|    << MPIPE_MMIO_ADDR__REGION_SHIFT) | ||||
| 
 | ||||
| 
 | ||||
| /*
 | ||||
|  * Each type of resource allocation comes in quantized chunks, where | ||||
|  * XXX_BITS is the number of chunks, and XXX_RES_PER_BIT is the number | ||||
|  * of resources in each chunk. | ||||
|  */ | ||||
| 
 | ||||
| /** Number of buffer stack chunks available (32). */ | ||||
| #define HV_MPIPE_ALLOC_BUFFER_STACKS_BITS \ | ||||
|   MPIPE_MMIO_INIT_DAT_GX36_1__BUFFER_STACK_MASK_WIDTH | ||||
| 
 | ||||
| /** Granularity of buffer stack allocation (1). */ | ||||
| #define HV_MPIPE_ALLOC_BUFFER_STACKS_RES_PER_BIT \ | ||||
|   (HV_MPIPE_NUM_BUFFER_STACKS / HV_MPIPE_ALLOC_BUFFER_STACKS_BITS) | ||||
| 
 | ||||
| /** Number of NotifRing chunks available (32). */ | ||||
| #define HV_MPIPE_ALLOC_NOTIF_RINGS_BITS \ | ||||
|   MPIPE_MMIO_INIT_DAT_GX36_0__NOTIF_RING_MASK_WIDTH | ||||
| 
 | ||||
| /** Granularity of NotifRing allocation (8). */ | ||||
| #define HV_MPIPE_ALLOC_NOTIF_RINGS_RES_PER_BIT \ | ||||
|   (HV_MPIPE_NUM_NOTIF_RINGS / HV_MPIPE_ALLOC_NOTIF_RINGS_BITS) | ||||
| 
 | ||||
| /** Number of NotifGroup chunks available (32). */ | ||||
| #define HV_MPIPE_ALLOC_NOTIF_GROUPS_BITS \ | ||||
|   HV_MPIPE_NUM_NOTIF_GROUPS | ||||
| 
 | ||||
| /** Granularity of NotifGroup allocation (1). */ | ||||
| #define HV_MPIPE_ALLOC_NOTIF_GROUPS_RES_PER_BIT \ | ||||
|   (HV_MPIPE_NUM_NOTIF_GROUPS / HV_MPIPE_ALLOC_NOTIF_GROUPS_BITS) | ||||
| 
 | ||||
| /** Number of lo bucket chunks available (16). */ | ||||
| #define HV_MPIPE_ALLOC_LO_BUCKETS_BITS \ | ||||
|   MPIPE_MMIO_INIT_DAT_GX36_0__BUCKET_RELEASE_MASK_LO_WIDTH | ||||
| 
 | ||||
| /** Granularity of lo bucket allocation (256). */ | ||||
| #define HV_MPIPE_ALLOC_LO_BUCKETS_RES_PER_BIT \ | ||||
|   (HV_MPIPE_NUM_LO_BUCKETS / HV_MPIPE_ALLOC_LO_BUCKETS_BITS) | ||||
| 
 | ||||
| /** Number of hi bucket chunks available (16). */ | ||||
| #define HV_MPIPE_ALLOC_HI_BUCKETS_BITS \ | ||||
|   MPIPE_MMIO_INIT_DAT_GX36_0__BUCKET_RELEASE_MASK_HI_WIDTH | ||||
| 
 | ||||
| /** Granularity of hi bucket allocation (4). */ | ||||
| #define HV_MPIPE_ALLOC_HI_BUCKETS_RES_PER_BIT \ | ||||
|   (HV_MPIPE_NUM_HI_BUCKETS / HV_MPIPE_ALLOC_HI_BUCKETS_BITS) | ||||
| 
 | ||||
| /** Number of eDMA ring chunks available (24). */ | ||||
| #define HV_MPIPE_ALLOC_EDMA_RINGS_BITS \ | ||||
|   MPIPE_MMIO_INIT_DAT_GX36_1__EDMA_POST_MASK_WIDTH | ||||
| 
 | ||||
| /** Granularity of eDMA ring allocation (1). */ | ||||
| #define HV_MPIPE_ALLOC_EDMA_RINGS_RES_PER_BIT \ | ||||
|   (HV_MPIPE_NUM_EDMA_RINGS / HV_MPIPE_ALLOC_EDMA_RINGS_BITS) | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| /** Bit vector encoding which NotifRings are in a NotifGroup. */ | ||||
| typedef struct | ||||
| { | ||||
|   /** The actual bits. */ | ||||
|   uint64_t ring_mask[4]; | ||||
| 
 | ||||
| } gxio_mpipe_notif_group_bits_t; | ||||
| 
 | ||||
| 
 | ||||
| /** Another name for MPIPE_LBL_INIT_DAT_BSTS_TBL_t. */ | ||||
| typedef MPIPE_LBL_INIT_DAT_BSTS_TBL_t gxio_mpipe_bucket_info_t; | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| /** Eight buffer stack ids. */ | ||||
| typedef struct | ||||
| { | ||||
|   /** The stacks. */ | ||||
|   uint8_t stacks[8]; | ||||
| 
 | ||||
| } gxio_mpipe_rules_stacks_t; | ||||
| 
 | ||||
| 
 | ||||
| /** A destination mac address. */ | ||||
| typedef struct | ||||
| { | ||||
|   /** The octets. */ | ||||
|   uint8_t octets[6]; | ||||
| 
 | ||||
| } gxio_mpipe_rules_dmac_t; | ||||
| 
 | ||||
| 
 | ||||
| /** A vlan. */ | ||||
| typedef uint16_t gxio_mpipe_rules_vlan_t; | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| /** Maximum number of characters in a link name. */ | ||||
| #define GXIO_MPIPE_LINK_NAME_LEN  32 | ||||
| 
 | ||||
| 
 | ||||
| /** Structure holding a link name.  Only needed, and only typedef'ed,
 | ||||
|  *  because the IORPC stub generator only handles types which are single | ||||
|  *  words coming before the parameter name. */ | ||||
| typedef struct | ||||
| { | ||||
|   /** The name itself. */ | ||||
|   char name[GXIO_MPIPE_LINK_NAME_LEN]; | ||||
| } | ||||
| _gxio_mpipe_link_name_t; | ||||
| 
 | ||||
| /** Maximum number of characters in a symbol name. */ | ||||
| #define GXIO_MPIPE_SYMBOL_NAME_LEN  128 | ||||
| 
 | ||||
| 
 | ||||
| /** Structure holding a symbol name.  Only needed, and only typedef'ed,
 | ||||
|  *  because the IORPC stub generator only handles types which are single | ||||
|  *  words coming before the parameter name. */ | ||||
| typedef struct | ||||
| { | ||||
|   /** The name itself. */ | ||||
|   char name[GXIO_MPIPE_SYMBOL_NAME_LEN]; | ||||
| } | ||||
| _gxio_mpipe_symbol_name_t; | ||||
| 
 | ||||
| 
 | ||||
| /** Structure holding a MAC address. */ | ||||
| typedef struct | ||||
| { | ||||
|   /** The address. */ | ||||
|   uint8_t mac[6]; | ||||
| } | ||||
| _gxio_mpipe_link_mac_t; | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| /** Request shared data permission -- that is, the ability to send and
 | ||||
|  *  receive packets -- on the specified link.  Other processes may also | ||||
|  *  request shared data permission on the same link. | ||||
|  * | ||||
|  *  No more than one of ::GXIO_MPIPE_LINK_DATA, ::GXIO_MPIPE_LINK_NO_DATA, | ||||
|  *  or ::GXIO_MPIPE_LINK_EXCL_DATA may be specifed in a gxio_mpipe_link_open() | ||||
|  *  call.  If none are specified, ::GXIO_MPIPE_LINK_DATA is assumed. | ||||
|  */ | ||||
| #define GXIO_MPIPE_LINK_DATA               0x00000001UL | ||||
| 
 | ||||
| /** Do not request data permission on the specified link.
 | ||||
|  * | ||||
|  *  No more than one of ::GXIO_MPIPE_LINK_DATA, ::GXIO_MPIPE_LINK_NO_DATA, | ||||
|  *  or ::GXIO_MPIPE_LINK_EXCL_DATA may be specifed in a gxio_mpipe_link_open() | ||||
|  *  call.  If none are specified, ::GXIO_MPIPE_LINK_DATA is assumed. | ||||
|  */ | ||||
| #define GXIO_MPIPE_LINK_NO_DATA            0x00000002UL | ||||
| 
 | ||||
| /** Request exclusive data permission -- that is, the ability to send and
 | ||||
|  *  receive packets -- on the specified link.  No other processes may | ||||
|  *  request data permission on this link, and if any process already has | ||||
|  *  data permission on it, this open will fail. | ||||
|  * | ||||
|  *  No more than one of ::GXIO_MPIPE_LINK_DATA, ::GXIO_MPIPE_LINK_NO_DATA, | ||||
|  *  or ::GXIO_MPIPE_LINK_EXCL_DATA may be specifed in a gxio_mpipe_link_open() | ||||
|  *  call.  If none are specified, ::GXIO_MPIPE_LINK_DATA is assumed. | ||||
|  */ | ||||
| #define GXIO_MPIPE_LINK_EXCL_DATA          0x00000004UL | ||||
| 
 | ||||
| /** Request shared stats permission -- that is, the ability to read and write
 | ||||
|  *  registers which contain link statistics, and to get link attributes -- | ||||
|  *  on the specified link.  Other processes may also request shared stats | ||||
|  *  permission on the same link. | ||||
|  * | ||||
|  *  No more than one of ::GXIO_MPIPE_LINK_STATS, ::GXIO_MPIPE_LINK_NO_STATS, | ||||
|  *  or ::GXIO_MPIPE_LINK_EXCL_STATS may be specifed in a gxio_mpipe_link_open() | ||||
|  *  call.  If none are specified, ::GXIO_MPIPE_LINK_STATS is assumed. | ||||
|  */ | ||||
| #define GXIO_MPIPE_LINK_STATS              0x00000008UL | ||||
| 
 | ||||
| /** Do not request stats permission on the specified link.
 | ||||
|  * | ||||
|  *  No more than one of ::GXIO_MPIPE_LINK_STATS, ::GXIO_MPIPE_LINK_NO_STATS, | ||||
|  *  or ::GXIO_MPIPE_LINK_EXCL_STATS may be specifed in a gxio_mpipe_link_open() | ||||
|  *  call.  If none are specified, ::GXIO_MPIPE_LINK_STATS is assumed. | ||||
|  */ | ||||
| #define GXIO_MPIPE_LINK_NO_STATS           0x00000010UL | ||||
| 
 | ||||
| /** Request exclusive stats permission -- that is, the ability to read and
 | ||||
|  *  write registers which contain link statistics, and to get link | ||||
|  *  attributes -- on the specified link.  No other processes may request | ||||
|  *  stats permission on this link, and if any process already | ||||
|  *  has stats permission on it, this open will fail. | ||||
|  * | ||||
|  *  Requesting exclusive stats permission is normally a very bad idea, since | ||||
|  *  it prevents programs like mpipe-stat from providing information on this | ||||
|  *  link.  Applications should only do this if they use MAC statistics | ||||
|  *  registers, and cannot tolerate any of the clear-on-read registers being | ||||
|  *  reset by other statistics programs. | ||||
|  * | ||||
|  *  No more than one of ::GXIO_MPIPE_LINK_STATS, ::GXIO_MPIPE_LINK_NO_STATS, | ||||
|  *  or ::GXIO_MPIPE_LINK_EXCL_STATS may be specifed in a gxio_mpipe_link_open() | ||||
|  *  call.  If none are specified, ::GXIO_MPIPE_LINK_STATS is assumed. | ||||
|  */ | ||||
| #define GXIO_MPIPE_LINK_EXCL_STATS         0x00000020UL | ||||
| 
 | ||||
| /** Request shared control permission -- that is, the ability to modify link
 | ||||
|  *  attributes, and read and write MAC and MDIO registers -- on the | ||||
|  *  specified link.  Other processes may also request shared control | ||||
|  *  permission on the same link. | ||||
|  * | ||||
|  *  No more than one of ::GXIO_MPIPE_LINK_CTL, ::GXIO_MPIPE_LINK_NO_CTL, | ||||
|  *  or ::GXIO_MPIPE_LINK_EXCL_CTL may be specifed in a gxio_mpipe_link_open() | ||||
|  *  call.  If none are specified, ::GXIO_MPIPE_LINK_CTL is assumed. | ||||
|  */ | ||||
| #define GXIO_MPIPE_LINK_CTL                0x00000040UL | ||||
| 
 | ||||
| /** Do not request control permission on the specified link.
 | ||||
|  * | ||||
|  *  No more than one of ::GXIO_MPIPE_LINK_CTL, ::GXIO_MPIPE_LINK_NO_CTL, | ||||
|  *  or ::GXIO_MPIPE_LINK_EXCL_CTL may be specifed in a gxio_mpipe_link_open() | ||||
|  *  call.  If none are specified, ::GXIO_MPIPE_LINK_CTL is assumed. | ||||
|  */ | ||||
| #define GXIO_MPIPE_LINK_NO_CTL             0x00000080UL | ||||
| 
 | ||||
| /** Request exclusive control permission -- that is, the ability to modify
 | ||||
|  *  link attributes, and read and write MAC and MDIO registers -- on the | ||||
|  *  specified link.  No other processes may request control permission on | ||||
|  *  this link, and if any process already has control permission on it, | ||||
|  *  this open will fail. | ||||
|  * | ||||
|  *  Requesting exclusive control permission is not always a good idea, since | ||||
|  *  it prevents programs like mpipe-link from configuring the link. | ||||
|  * | ||||
|  *  No more than one of ::GXIO_MPIPE_LINK_CTL, ::GXIO_MPIPE_LINK_NO_CTL, | ||||
|  *  or ::GXIO_MPIPE_LINK_EXCL_CTL may be specifed in a gxio_mpipe_link_open() | ||||
|  *  call.  If none are specified, ::GXIO_MPIPE_LINK_CTL is assumed. | ||||
|  */ | ||||
| #define GXIO_MPIPE_LINK_EXCL_CTL           0x00000100UL | ||||
| 
 | ||||
| /** Set the desired state of the link to up, allowing any speeds which are
 | ||||
|  *  supported by the link hardware, as part of this open operation; do not | ||||
|  *  change the desired state of the link when it is closed or the process | ||||
|  *  exits.  No more than one of ::GXIO_MPIPE_LINK_AUTO_UP, | ||||
|  *  ::GXIO_MPIPE_LINK_AUTO_UPDOWN, ::GXIO_MPIPE_LINK_AUTO_DOWN, or | ||||
|  *  ::GXIO_MPIPE_LINK_AUTO_NONE may be specifed in a gxio_mpipe_link_open() | ||||
|  *  call.  If none are specified, ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed. | ||||
|  */ | ||||
| #define GXIO_MPIPE_LINK_AUTO_UP            0x00000200UL | ||||
| 
 | ||||
| /** Set the desired state of the link to up, allowing any speeds which are
 | ||||
|  *  supported by the link hardware, as part of this open operation; when the | ||||
|  *  link is closed or this process exits, if no other process has the link | ||||
|  *  open, set the desired state of the link to down.  No more than one of | ||||
|  *  ::GXIO_MPIPE_LINK_AUTO_UP, ::GXIO_MPIPE_LINK_AUTO_UPDOWN, | ||||
|  *  ::GXIO_MPIPE_LINK_AUTO_DOWN, or ::GXIO_MPIPE_LINK_AUTO_NONE may be | ||||
|  *  specifed in a gxio_mpipe_link_open() call.  If none are specified, | ||||
|  *  ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed. | ||||
|  */ | ||||
| #define GXIO_MPIPE_LINK_AUTO_UPDOWN        0x00000400UL | ||||
| 
 | ||||
| /** Do not change the desired state of the link as part of the open
 | ||||
|  *  operation; when the link is closed or this process exits, if no other | ||||
|  *  process has the link open, set the desired state of the link to down. | ||||
|  *  No more than one of ::GXIO_MPIPE_LINK_AUTO_UP, | ||||
|  *  ::GXIO_MPIPE_LINK_AUTO_UPDOWN, ::GXIO_MPIPE_LINK_AUTO_DOWN, or | ||||
|  *  ::GXIO_MPIPE_LINK_AUTO_NONE may be specifed in a gxio_mpipe_link_open() | ||||
|  *  call.  If none are specified, ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed. | ||||
|  */ | ||||
| #define GXIO_MPIPE_LINK_AUTO_DOWN          0x00000800UL | ||||
| 
 | ||||
| /** Do not change the desired state of the link as part of the open
 | ||||
|  *  operation; do not change the desired state of the link when it is | ||||
|  *  closed or the process exits.  No more than one of | ||||
|  *  ::GXIO_MPIPE_LINK_AUTO_UP, ::GXIO_MPIPE_LINK_AUTO_UPDOWN, | ||||
|  *  ::GXIO_MPIPE_LINK_AUTO_DOWN, or ::GXIO_MPIPE_LINK_AUTO_NONE may be | ||||
|  *  specifed in a gxio_mpipe_link_open() call.  If none are specified, | ||||
|  *  ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed. | ||||
|  */ | ||||
| #define GXIO_MPIPE_LINK_AUTO_NONE          0x00001000UL | ||||
| 
 | ||||
| /** Request that this open call not complete until the network link is up.
 | ||||
|  *  The process will wait as long as necessary for this to happen; | ||||
|  *  applications which wish to abandon waiting for the link after a | ||||
|  *  specific time period should not specify this flag when opening a link, | ||||
|  *  but should instead call gxio_mpipe_link_wait() afterward.  The link | ||||
|  *  must be opened with stats permission.  Note that this flag by itself | ||||
|  *  does not change the desired link state; if other open flags or previous | ||||
|  *  link state changes have not requested a desired state of up, the open | ||||
|  *  call will never complete.  This flag is not available to kernel | ||||
|  *  clients. | ||||
|  */ | ||||
| #define GXIO_MPIPE_LINK_WAIT               0x00002000UL | ||||
| 
 | ||||
| 
 | ||||
| /*
 | ||||
|  * Note: link attributes must fit in 24 bits, since we use the top 8 bits | ||||
|  * of the IORPC offset word for the channel number. | ||||
|  */ | ||||
| 
 | ||||
| /** Determine whether jumbo frames may be received.  If this attribute's
 | ||||
|  *  value value is nonzero, the MAC will accept frames of up to 10240 bytes. | ||||
|  *  If the value is zero, the MAC will only accept frames of up to 1544 | ||||
|  *  bytes.  The default value is zero. */ | ||||
| #define GXIO_MPIPE_LINK_RECEIVE_JUMBO      0x010000 | ||||
| 
 | ||||
| /** Determine whether to send pause frames on this link if the mPIPE packet
 | ||||
|  *  FIFO is nearly full.  If the value is zero, pause frames are not sent. | ||||
|  *  If the value is nonzero, it is the delay value which will be sent in any | ||||
|  *  pause frames which are output, in units of 512 bit times. | ||||
|  * | ||||
|  *  Bear in mind that in almost all circumstances, the mPIPE packet FIFO | ||||
|  *  will never fill up, since mPIPE will empty it as fast as or faster than | ||||
|  *  the incoming data rate, by either delivering or dropping packets.  The | ||||
|  *  only situation in which this is not true is if the memory and cache | ||||
|  *  subsystem is extremely heavily loaded, and mPIPE cannot perform DMA of | ||||
|  *  packet data to memory in a timely fashion.  In particular, pause frames | ||||
|  *  will <em>not</em> be sent if packets cannot be delivered because | ||||
|  *  NotifRings are full, buckets are full, or buffers are not available in | ||||
|  *  a buffer stack. */ | ||||
| #define GXIO_MPIPE_LINK_SEND_PAUSE         0x020000 | ||||
| 
 | ||||
| /** Determine whether to suspend output on the receipt of pause frames.
 | ||||
|  *  If the value is nonzero, mPIPE shim will suspend output on the link's | ||||
|  *  channel when a pause frame is received.  If the value is zero, pause | ||||
|  *  frames will be ignored.  The default value is zero. */ | ||||
| #define GXIO_MPIPE_LINK_RECEIVE_PAUSE      0x030000 | ||||
| 
 | ||||
| /** Interface MAC address.  The value is a 6-byte MAC address, in the least
 | ||||
|  *  significant 48 bits of the value; in other words, an address which would | ||||
|  *  be printed as '12:34:56:78:90:AB' in IEEE 802 canonical format would | ||||
|  *  be returned as 0x12345678ab. | ||||
|  * | ||||
|  *  Depending upon the overall system design, a MAC address may or may not | ||||
|  *  be available for each interface.  Note that the interface's MAC address | ||||
|  *  does not limit the packets received on its channel, although the | ||||
|  *  classifier's rules could be configured to do that.  Similarly, the MAC | ||||
|  *  address is not used when transmitting packets, although applications | ||||
|  *  could certainly decide to use the assigned address as a source MAC | ||||
|  *  address when doing so.  This attribute may only be retrieved with | ||||
|  *  gxio_mpipe_link_get_attr(); it may not be modified. | ||||
|  */ | ||||
| #define GXIO_MPIPE_LINK_MAC                0x040000 | ||||
| 
 | ||||
| /** Determine whether to discard egress packets on link down. If this value
 | ||||
|  *  is nonzero, packets sent on this link while the link is down will be | ||||
|  *  discarded.  If this value is zero, no packets will be sent on this link | ||||
|  *  while it is down.  The default value is one. */ | ||||
| #define GXIO_MPIPE_LINK_DISCARD_IF_DOWN    0x050000 | ||||
| 
 | ||||
| /** Possible link state.  The value is a combination of link state flags,
 | ||||
|  *  ORed together, that indicate link modes which are actually supported by | ||||
|  *  the hardware.  This attribute may only be retrieved with | ||||
|  *  gxio_mpipe_link_get_attr(); it may not be modified. */ | ||||
| #define GXIO_MPIPE_LINK_POSSIBLE_STATE     0x060000 | ||||
| 
 | ||||
| /** Current link state.  The value is a combination of link state flags,
 | ||||
|  *  ORed together, that indicate the current state of the hardware.  If the | ||||
|  *  link is down, the value ANDed with ::GXIO_MPIPE_LINK_SPEED will be zero; | ||||
|  *  if the link is up, the value ANDed with ::GXIO_MPIPE_LINK_SPEED will | ||||
|  *  result in exactly one of the speed values, indicating the current speed. | ||||
|  *  This attribute may only be retrieved with gxio_mpipe_link_get_attr(); it | ||||
|  *  may not be modified. */ | ||||
| #define GXIO_MPIPE_LINK_CURRENT_STATE      0x070000 | ||||
| 
 | ||||
| /** Desired link state. The value is a conbination of flags, which specify
 | ||||
|  *  the desired state for the link.  With gxio_mpipe_link_set_attr(), this | ||||
|  *  will, in the background, attempt to bring up the link using whichever of | ||||
|  *  the requested flags are reasonable, or take down the link if the flags | ||||
|  *  are zero.  The actual link up or down operation may happen after this | ||||
|  *  call completes.  If the link state changes in the future, the system | ||||
|  *  will continue to try to get back to the desired link state; for | ||||
|  *  instance, if the link is brought up successfully, and then the network | ||||
|  *  cable is disconnected, the link will go down.  However, the desired | ||||
|  *  state of the link is still up, so if the cable is reconnected, the link | ||||
|  *  will be brought up again. | ||||
|  * | ||||
|  *  With gxio_mpipe_link_set_attr(), this will indicate the desired state | ||||
|  *  for the link, as set with a previous gxio_mpipe_link_set_attr() call, | ||||
|  *  or implicitly by a gxio_mpipe_link_open() or link close operation. | ||||
|  *  This may not reflect the current state of the link; to get that, use | ||||
|  *  ::GXIO_MPIPE_LINK_CURRENT_STATE. | ||||
|  */ | ||||
| #define GXIO_MPIPE_LINK_DESIRED_STATE      0x080000 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| /** Link can run, should run, or is running at 10 Mbps. */ | ||||
| #define GXIO_MPIPE_LINK_10M        0x0000000000000001UL | ||||
| 
 | ||||
| /** Link can run, should run, or is running at 100 Mbps. */ | ||||
| #define GXIO_MPIPE_LINK_100M       0x0000000000000002UL | ||||
| 
 | ||||
| /** Link can run, should run, or is running at 1 Gbps. */ | ||||
| #define GXIO_MPIPE_LINK_1G         0x0000000000000004UL | ||||
| 
 | ||||
| /** Link can run, should run, or is running at 10 Gbps. */ | ||||
| #define GXIO_MPIPE_LINK_10G        0x0000000000000008UL | ||||
| 
 | ||||
| /** Link can run, should run, or is running at 20 Gbps. */ | ||||
| #define GXIO_MPIPE_LINK_20G        0x0000000000000010UL | ||||
| 
 | ||||
| /** Link can run, should run, or is running at 25 Gbps. */ | ||||
| #define GXIO_MPIPE_LINK_25G        0x0000000000000020UL | ||||
| 
 | ||||
| /** Link can run, should run, or is running at 50 Gbps. */ | ||||
| #define GXIO_MPIPE_LINK_50G        0x0000000000000040UL | ||||
| 
 | ||||
| /** Link should run at the highest speed supported by the link and by
 | ||||
|  *  the device connected to the link.  Only usable as a value for | ||||
|  *  the link's desired state; never returned as a value for the current | ||||
|  *  or possible states. */ | ||||
| #define GXIO_MPIPE_LINK_ANYSPEED   0x0000000000000800UL | ||||
| 
 | ||||
| /** All legal link speeds.  This value is provided for use in extracting
 | ||||
|  *  the speed-related subset of the link state flags; it is not intended | ||||
|  *  to be set directly as a value for one of the GXIO_MPIPE_LINK_xxx_STATE | ||||
|  *  attributes.  A link is up or is requested to be up if its current or | ||||
|  *  desired state, respectively, ANDED with this value, is nonzero. */ | ||||
| #define GXIO_MPIPE_LINK_SPEED_MASK 0x0000000000000FFFUL | ||||
| 
 | ||||
| /** Link can run, should run, or is running in MAC loopback mode.  This
 | ||||
|  *  loops transmitted packets back to the receiver, inside the Tile | ||||
|  *  Processor. */ | ||||
| #define GXIO_MPIPE_LINK_LOOP_MAC   0x0000000000001000UL | ||||
| 
 | ||||
| /** Link can run, should run, or is running in PHY loopback mode.  This
 | ||||
|  *  loops transmitted packets back to the receiver, inside the external | ||||
|  *  PHY chip. */ | ||||
| #define GXIO_MPIPE_LINK_LOOP_PHY   0x0000000000002000UL | ||||
| 
 | ||||
| /** Link can run, should run, or is running in external loopback mode.
 | ||||
|  *  This requires that an external loopback plug be installed on the | ||||
|  *  Ethernet port.  Note that only some links require that this be | ||||
|  *  configured via the gxio_mpipe_link routines; other links can do | ||||
|  *  external loopack with the plug and no special configuration. */ | ||||
| #define GXIO_MPIPE_LINK_LOOP_EXT   0x0000000000004000UL | ||||
| 
 | ||||
| /** All legal loopback types. */ | ||||
| #define GXIO_MPIPE_LINK_LOOP_MASK  0x000000000000F000UL | ||||
| 
 | ||||
| /** Link can run, should run, or is running in full-duplex mode.
 | ||||
|  *  If neither ::GXIO_MPIPE_LINK_FDX nor ::GXIO_MPIPE_LINK_HDX are | ||||
|  *  specified in a set of desired state flags, both are assumed. */ | ||||
| #define GXIO_MPIPE_LINK_FDX        0x0000000000010000UL | ||||
| 
 | ||||
| /** Link can run, should run, or is running in half-duplex mode.
 | ||||
|  *  If neither ::GXIO_MPIPE_LINK_FDX nor ::GXIO_MPIPE_LINK_HDX are | ||||
|  *  specified in a set of desired state flags, both are assumed. */ | ||||
| #define GXIO_MPIPE_LINK_HDX        0x0000000000020000UL | ||||
| 
 | ||||
| 
 | ||||
| /** An individual rule. */ | ||||
| typedef struct | ||||
| { | ||||
|   /** The total size. */ | ||||
|   uint16_t size; | ||||
| 
 | ||||
|   /** The priority. */ | ||||
|   int16_t priority; | ||||
| 
 | ||||
|   /** The "headroom" in each buffer. */ | ||||
|   uint8_t headroom; | ||||
| 
 | ||||
|   /** The "tailroom" in each buffer. */ | ||||
|   uint8_t tailroom; | ||||
| 
 | ||||
|   /** The "capacity" of the largest buffer. */ | ||||
|   uint16_t capacity; | ||||
| 
 | ||||
|   /** The mask for converting a flow hash into a bucket. */ | ||||
|   uint16_t bucket_mask; | ||||
| 
 | ||||
|   /** The offset for converting a flow hash into a bucket. */ | ||||
|   uint16_t bucket_first; | ||||
| 
 | ||||
|   /** The buffer stack ids. */ | ||||
|   gxio_mpipe_rules_stacks_t stacks; | ||||
| 
 | ||||
|   /** The actual channels. */ | ||||
|   uint32_t channel_bits; | ||||
| 
 | ||||
|   /** The number of dmacs. */ | ||||
|   uint16_t num_dmacs; | ||||
| 
 | ||||
|   /** The number of vlans. */ | ||||
|   uint16_t num_vlans; | ||||
| 
 | ||||
|   /** The actual dmacs and vlans. */ | ||||
|   uint8_t dmacs_and_vlans[]; | ||||
| 
 | ||||
| } gxio_mpipe_rules_rule_t; | ||||
| 
 | ||||
| 
 | ||||
| /** A list of classifier rules. */ | ||||
| typedef struct | ||||
| { | ||||
|   /** The offset to the end of the current rule. */ | ||||
|   uint16_t tail; | ||||
| 
 | ||||
|   /** The offset to the start of the current rule. */ | ||||
|   uint16_t head; | ||||
| 
 | ||||
|   /** The actual rules. */ | ||||
|   uint8_t rules[4096 - 4]; | ||||
| 
 | ||||
| } gxio_mpipe_rules_list_t; | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| /** mPIPE statistics structure. These counters include all relevant
 | ||||
|  *  events occurring on all links within the mPIPE shim. */ | ||||
| typedef struct | ||||
| { | ||||
|   /** Number of ingress packets dropped for any reason. */ | ||||
|   uint64_t ingress_drops; | ||||
|   /** Number of ingress packets dropped because a buffer stack was empty. */ | ||||
|   uint64_t ingress_drops_no_buf; | ||||
|   /** Number of ingress packets dropped or truncated due to lack of space in
 | ||||
|    *  the iPkt buffer. */ | ||||
|   uint64_t ingress_drops_ipkt; | ||||
|   /** Number of ingress packets dropped by the classifier or load balancer */ | ||||
|   uint64_t ingress_drops_cls_lb; | ||||
|   /** Total number of ingress packets. */ | ||||
|   uint64_t ingress_packets; | ||||
|   /** Total number of egress packets. */ | ||||
|   uint64_t egress_packets; | ||||
|   /** Total number of ingress bytes. */ | ||||
|   uint64_t ingress_bytes; | ||||
|   /** Total number of egress bytes. */ | ||||
|   uint64_t egress_bytes; | ||||
| } | ||||
| gxio_mpipe_stats_t; | ||||
| 
 | ||||
| 
 | ||||
| #endif /* _SYS_HV_DRV_MPIPE_INTF_H */ | ||||
							
								
								
									
										195
									
								
								arch/tile/include/hv/drv_trio_intf.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										195
									
								
								arch/tile/include/hv/drv_trio_intf.h
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,195 @@ | ||||
| /*
 | ||||
|  * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||||
|  * | ||||
|  *   This program is free software; you can redistribute it and/or | ||||
|  *   modify it under the terms of the GNU General Public License | ||||
|  *   as published by the Free Software Foundation, version 2. | ||||
|  * | ||||
|  *   This program is distributed in the hope that it will be useful, but | ||||
|  *   WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||||
|  *   NON INFRINGEMENT.  See the GNU General Public License for | ||||
|  *   more details. | ||||
|  */ | ||||
| 
 | ||||
| /**
 | ||||
|  * Interface definitions for the trio driver. | ||||
|  */ | ||||
| 
 | ||||
| #ifndef _SYS_HV_DRV_TRIO_INTF_H | ||||
| #define _SYS_HV_DRV_TRIO_INTF_H | ||||
| 
 | ||||
| #include <arch/trio.h> | ||||
| 
 | ||||
| /** The vendor ID for all Tilera processors. */ | ||||
| #define TILERA_VENDOR_ID 0x1a41 | ||||
| 
 | ||||
| /** The device ID for the Gx36 processor. */ | ||||
| #define TILERA_GX36_DEV_ID 0x0200 | ||||
| 
 | ||||
| /** Device ID for our internal bridge when running as RC. */ | ||||
| #define TILERA_GX36_RC_DEV_ID 0x2000 | ||||
| 
 | ||||
| /** Maximum number of TRIO interfaces. */ | ||||
| #define TILEGX_NUM_TRIO         2 | ||||
| 
 | ||||
| /** Gx36 has max 3 PCIe MACs per TRIO interface. */ | ||||
| #define TILEGX_TRIO_PCIES       3 | ||||
| 
 | ||||
| /** Specify port properties for a PCIe MAC. */ | ||||
| struct pcie_port_property | ||||
| { | ||||
|   /** If true, the link can be configured in PCIe root complex mode. */ | ||||
|   uint8_t allow_rc: 1; | ||||
| 
 | ||||
|   /** If true, the link can be configured in PCIe endpoint mode. */ | ||||
|   uint8_t allow_ep: 1; | ||||
| 
 | ||||
|   /** If true, the link can be configured in StreamIO mode. */ | ||||
|   uint8_t allow_sio: 1; | ||||
| 
 | ||||
|   /** If true, the link is allowed to support 1-lane operation. Software
 | ||||
|    *  will not consider it an error if the link comes up as a x1 link. */ | ||||
|   uint8_t allow_x1: 1; | ||||
| 
 | ||||
|   /** If true, the link is allowed to support 2-lane operation. Software
 | ||||
|    *  will not consider it an error if the link comes up as a x2 link. */ | ||||
|   uint8_t allow_x2: 1; | ||||
| 
 | ||||
|   /** If true, the link is allowed to support 4-lane operation. Software
 | ||||
|    *  will not consider it an error if the link comes up as a x4 link. */ | ||||
|   uint8_t allow_x4: 1; | ||||
| 
 | ||||
|   /** If true, the link is allowed to support 8-lane operation. Software
 | ||||
|    *  will not consider it an error if the link comes up as a x8 link. */ | ||||
|   uint8_t allow_x8: 1; | ||||
| 
 | ||||
|   /** Reserved. */ | ||||
|   uint8_t reserved: 1; | ||||
| 
 | ||||
| }; | ||||
| 
 | ||||
| /** Configurations can be issued to configure a char stream interrupt. */ | ||||
| typedef enum pcie_stream_intr_config_sel_e | ||||
| { | ||||
|   /** Interrupt configuration for memory map regions. */ | ||||
|   MEM_MAP_SEL, | ||||
| 
 | ||||
|   /** Interrupt configuration for push DMAs. */ | ||||
|   PUSH_DMA_SEL, | ||||
| 
 | ||||
|   /** Interrupt configuration for pull DMAs. */ | ||||
|   PULL_DMA_SEL, | ||||
| } | ||||
| pcie_stream_intr_config_sel_t; | ||||
| 
 | ||||
| 
 | ||||
| /** The mmap file offset (PA) of the TRIO config region. */ | ||||
| #define HV_TRIO_CONFIG_OFFSET                                        \ | ||||
|   ((unsigned long long)TRIO_MMIO_ADDRESS_SPACE__REGION_VAL_CFG <<   \ | ||||
|     TRIO_MMIO_ADDRESS_SPACE__REGION_SHIFT) | ||||
| 
 | ||||
| /** The maximum size of the TRIO config region. */ | ||||
| #define HV_TRIO_CONFIG_SIZE                                 \ | ||||
|   (1ULL << TRIO_CFG_REGION_ADDR__REGION_SHIFT) | ||||
| 
 | ||||
| /** Size of the config region mapped into client. We can't use
 | ||||
|  *  TRIO_MMIO_ADDRESS_SPACE__OFFSET_WIDTH because it | ||||
|  *  will require the kernel to allocate 4GB VA space | ||||
|  *  from the VMALLOC region which has a total range | ||||
|  *  of 4GB. | ||||
|  */ | ||||
| #define HV_TRIO_CONFIG_IOREMAP_SIZE                            \ | ||||
|   ((uint64_t) 1 << TRIO_CFG_REGION_ADDR__PROT_SHIFT) | ||||
| 
 | ||||
| /** The mmap file offset (PA) of a scatter queue region. */ | ||||
| #define HV_TRIO_SQ_OFFSET(queue)                                        \ | ||||
|   (((unsigned long long)TRIO_MMIO_ADDRESS_SPACE__REGION_VAL_MAP_SQ <<   \ | ||||
|     TRIO_MMIO_ADDRESS_SPACE__REGION_SHIFT) |                            \ | ||||
|    ((queue) << TRIO_MAP_SQ_REGION_ADDR__SQ_SEL_SHIFT)) | ||||
| 
 | ||||
| /** The maximum size of a scatter queue region. */ | ||||
| #define HV_TRIO_SQ_SIZE                                 \ | ||||
|   (1ULL << TRIO_MAP_SQ_REGION_ADDR__SQ_SEL_SHIFT) | ||||
| 
 | ||||
| 
 | ||||
| /** The "hardware MMIO region" of the first PIO region. */ | ||||
| #define HV_TRIO_FIRST_PIO_REGION 8 | ||||
| 
 | ||||
| /** The mmap file offset (PA) of a PIO region. */ | ||||
| #define HV_TRIO_PIO_OFFSET(region)                           \ | ||||
|   (((unsigned long long)(region) + HV_TRIO_FIRST_PIO_REGION) \ | ||||
|    << TRIO_PIO_REGIONS_ADDR__REGION_SHIFT) | ||||
| 
 | ||||
| /** The maximum size of a PIO region. */ | ||||
| #define HV_TRIO_PIO_SIZE (1ULL << TRIO_PIO_REGIONS_ADDR__ADDR_WIDTH) | ||||
| 
 | ||||
| 
 | ||||
| /** The mmap file offset (PA) of a push DMA region. */ | ||||
| #define HV_TRIO_PUSH_DMA_OFFSET(ring)                                   \ | ||||
|   (((unsigned long long)TRIO_MMIO_ADDRESS_SPACE__REGION_VAL_PUSH_DMA << \ | ||||
|     TRIO_MMIO_ADDRESS_SPACE__REGION_SHIFT) |                            \ | ||||
|    ((ring) << TRIO_PUSH_DMA_REGION_ADDR__RING_SEL_SHIFT)) | ||||
| 
 | ||||
| /** The mmap file offset (PA) of a pull DMA region. */ | ||||
| #define HV_TRIO_PULL_DMA_OFFSET(ring)                                   \ | ||||
|   (((unsigned long long)TRIO_MMIO_ADDRESS_SPACE__REGION_VAL_PULL_DMA << \ | ||||
|     TRIO_MMIO_ADDRESS_SPACE__REGION_SHIFT) |                            \ | ||||
|    ((ring) << TRIO_PULL_DMA_REGION_ADDR__RING_SEL_SHIFT)) | ||||
| 
 | ||||
| /** The maximum size of a DMA region. */ | ||||
| #define HV_TRIO_DMA_REGION_SIZE                         \ | ||||
|   (1ULL << TRIO_PUSH_DMA_REGION_ADDR__RING_SEL_SHIFT) | ||||
| 
 | ||||
| 
 | ||||
| /** The mmap file offset (PA) of a Mem-Map interrupt region. */ | ||||
| #define HV_TRIO_MEM_MAP_INTR_OFFSET(map)                                 \ | ||||
|   (((unsigned long long)TRIO_MMIO_ADDRESS_SPACE__REGION_VAL_MAP_MEM <<   \ | ||||
|     TRIO_MMIO_ADDRESS_SPACE__REGION_SHIFT) |                            \ | ||||
|    ((map) << TRIO_MAP_MEM_REGION_ADDR__MAP_SEL_SHIFT)) | ||||
| 
 | ||||
| /** The maximum size of a Mem-Map interrupt region. */ | ||||
| #define HV_TRIO_MEM_MAP_INTR_SIZE                                 \ | ||||
|   (1ULL << TRIO_MAP_MEM_REGION_ADDR__MAP_SEL_SHIFT) | ||||
| 
 | ||||
| 
 | ||||
| /** A flag bit indicating a fixed resource allocation. */ | ||||
| #define HV_TRIO_ALLOC_FIXED 0x01 | ||||
| 
 | ||||
| /** TRIO requires that all mappings have 4kB aligned start addresses. */ | ||||
| #define HV_TRIO_PAGE_SHIFT 12 | ||||
| 
 | ||||
| /** TRIO requires that all mappings have 4kB aligned start addresses. */ | ||||
| #define HV_TRIO_PAGE_SIZE (1ull << HV_TRIO_PAGE_SHIFT) | ||||
| 
 | ||||
| 
 | ||||
| /* Specify all PCIe port properties for a TRIO. */ | ||||
| struct pcie_trio_ports_property | ||||
| { | ||||
|   struct pcie_port_property ports[TILEGX_TRIO_PCIES]; | ||||
| }; | ||||
| 
 | ||||
| /* Flags indicating traffic class. */ | ||||
| #define HV_TRIO_FLAG_TC_SHIFT 4 | ||||
| #define HV_TRIO_FLAG_TC_RMASK 0xf | ||||
| #define HV_TRIO_FLAG_TC(N) \ | ||||
|   ((((N) & HV_TRIO_FLAG_TC_RMASK) + 1) << HV_TRIO_FLAG_TC_SHIFT) | ||||
| 
 | ||||
| /* Flags indicating virtual functions. */ | ||||
| #define HV_TRIO_FLAG_VFUNC_SHIFT 8 | ||||
| #define HV_TRIO_FLAG_VFUNC_RMASK 0xff | ||||
| #define HV_TRIO_FLAG_VFUNC(N) \ | ||||
|   ((((N) & HV_TRIO_FLAG_VFUNC_RMASK) + 1) << HV_TRIO_FLAG_VFUNC_SHIFT) | ||||
| 
 | ||||
| 
 | ||||
| /* Flag indicating an ordered PIO region. */ | ||||
| #define HV_TRIO_PIO_FLAG_ORDERED (1 << 16) | ||||
| 
 | ||||
| /* Flags indicating special types of PIO regions. */ | ||||
| #define HV_TRIO_PIO_FLAG_SPACE_SHIFT 17 | ||||
| #define HV_TRIO_PIO_FLAG_SPACE_MASK (0x3 << HV_TRIO_PIO_FLAG_SPACE_SHIFT) | ||||
| #define HV_TRIO_PIO_FLAG_CONFIG_SPACE (0x1 << HV_TRIO_PIO_FLAG_SPACE_SHIFT) | ||||
| #define HV_TRIO_PIO_FLAG_IO_SPACE (0x2 << HV_TRIO_PIO_FLAG_SPACE_SHIFT) | ||||
| 
 | ||||
| 
 | ||||
| #endif /* _SYS_HV_DRV_TRIO_INTF_H */ | ||||
							
								
								
									
										39
									
								
								arch/tile/include/hv/drv_usb_host_intf.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										39
									
								
								arch/tile/include/hv/drv_usb_host_intf.h
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,39 @@ | ||||
| /*
 | ||||
|  * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||||
|  * | ||||
|  *   This program is free software; you can redistribute it and/or | ||||
|  *   modify it under the terms of the GNU General Public License | ||||
|  *   as published by the Free Software Foundation, version 2. | ||||
|  * | ||||
|  *   This program is distributed in the hope that it will be useful, but | ||||
|  *   WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||||
|  *   NON INFRINGEMENT.  See the GNU General Public License for | ||||
|  *   more details. | ||||
|  */ | ||||
| 
 | ||||
| /**
 | ||||
|  * Interface definitions for the USB host driver. | ||||
|  */ | ||||
| 
 | ||||
| #ifndef _SYS_HV_DRV_USB_HOST_INTF_H | ||||
| #define _SYS_HV_DRV_USB_HOST_INTF_H | ||||
| 
 | ||||
| #include <arch/usb_host.h> | ||||
| 
 | ||||
| 
 | ||||
| /** Offset for the EHCI register MMIO region. */ | ||||
| #define HV_USB_HOST_MMIO_OFFSET_EHCI ((uint64_t) USB_HOST_HCCAPBASE_REG) | ||||
| 
 | ||||
| /** Offset for the OHCI register MMIO region. */ | ||||
| #define HV_USB_HOST_MMIO_OFFSET_OHCI ((uint64_t) USB_HOST_OHCD_HC_REVISION_REG) | ||||
| 
 | ||||
| /** Size of the register MMIO region.  This turns out to be the same for
 | ||||
|  *  both EHCI and OHCI. */ | ||||
| #define HV_USB_HOST_MMIO_SIZE ((uint64_t) 0x1000) | ||||
| 
 | ||||
| /** The number of service domains supported by the USB host shim. */ | ||||
| #define HV_USB_HOST_NUM_SVC_DOM 1 | ||||
| 
 | ||||
| 
 | ||||
| #endif /* _SYS_HV_DRV_USB_HOST_INTF_H */ | ||||
							
								
								
									
										714
									
								
								arch/tile/include/hv/iorpc.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										714
									
								
								arch/tile/include/hv/iorpc.h
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,714 @@ | ||||
| /*
 | ||||
|  * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||||
|  * | ||||
|  *   This program is free software; you can redistribute it and/or | ||||
|  *   modify it under the terms of the GNU General Public License | ||||
|  *   as published by the Free Software Foundation, version 2. | ||||
|  * | ||||
|  *   This program is distributed in the hope that it will be useful, but | ||||
|  *   WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||||
|  *   NON INFRINGEMENT.  See the GNU General Public License for | ||||
|  *   more details. | ||||
|  */ | ||||
| #ifndef _HV_IORPC_H_ | ||||
| #define _HV_IORPC_H_ | ||||
| 
 | ||||
| /**
 | ||||
|  * | ||||
|  * Error codes and struct definitions for the IO RPC library. | ||||
|  * | ||||
|  * The hypervisor's IO RPC component provides a convenient way for | ||||
|  * driver authors to proxy system calls between user space, linux, and | ||||
|  * the hypervisor driver.  The core of the system is a set of Python | ||||
|  * files that take ".idl" files as input and generates the following | ||||
|  * source code: | ||||
|  * | ||||
|  * - _rpc_call() routines for use in userspace IO libraries.  These | ||||
|  * routines take an argument list specified in the .idl file, pack the | ||||
|  * arguments in to a buffer, and read or write that buffer via the | ||||
|  * Linux iorpc driver. | ||||
|  * | ||||
|  * - dispatch_read() and dispatch_write() routines that hypervisor | ||||
|  * drivers can use to implement most of their dev_pread() and | ||||
|  * dev_pwrite() methods.  These routines decode the incoming parameter | ||||
|  * blob, permission check and translate parameters where appropriate, | ||||
|  * and then invoke a callback routine for whichever RPC call has | ||||
|  * arrived.  The driver simply implements the set of callback | ||||
|  * routines. | ||||
|  * | ||||
|  * The IO RPC system also includes the Linux 'iorpc' driver, which | ||||
|  * proxies calls between the userspace library and the hypervisor | ||||
|  * driver.  The Linux driver is almost entirely device agnostic; it | ||||
|  * watches for special flags indicating cases where a memory buffer | ||||
|  * address might need to be translated, etc.  As a result, driver | ||||
|  * writers can avoid many of the problem cases related to registering | ||||
|  * hardware resources like memory pages or interrupts.  However, the | ||||
|  * drivers must be careful to obey the conventions documented below in | ||||
|  * order to work properly with the generic Linux iorpc driver. | ||||
|  * | ||||
|  * @section iorpc_domains Service Domains | ||||
|  * | ||||
|  * All iorpc-based drivers must support a notion of service domains. | ||||
|  * A service domain is basically an application context - state | ||||
|  * indicating resources that are allocated to that particular app | ||||
|  * which it may access and (perhaps) other applications may not | ||||
|  * access.  Drivers can support any number of service domains they | ||||
|  * choose.  In some cases the design is limited by a number of service | ||||
|  * domains supported by the IO hardware; in other cases the service | ||||
|  * domains are a purely software concept and the driver chooses a | ||||
|  * maximum number of domains based on how much state memory it is | ||||
|  * willing to preallocate. | ||||
|  * | ||||
|  * For example, the mPIPE driver only supports as many service domains | ||||
|  * as are supported by the mPIPE hardware.  This limitation is | ||||
|  * required because the hardware implements its own MMIO protection | ||||
|  * scheme to allow large MMIO mappings while still protecting small | ||||
|  * register ranges within the page that should only be accessed by the | ||||
|  * hypervisor. | ||||
|  * | ||||
|  * In contrast, drivers with no hardware service domain limitations | ||||
|  * (for instance the TRIO shim) can implement an arbitrary number of | ||||
|  * service domains.  In these cases, each service domain is limited to | ||||
|  * a carefully restricted set of legal MMIO addresses if necessary to | ||||
|  * keep one application from corrupting another application's state. | ||||
|  * | ||||
|  * @section iorpc_conventions System Call Conventions | ||||
|  * | ||||
|  * The driver's open routine is responsible for allocating a new | ||||
|  * service domain for each hv_dev_open() call.  By convention, the | ||||
|  * return value from open() should be the service domain number on | ||||
|  * success, or GXIO_ERR_NO_SVC_DOM if no more service domains are | ||||
|  * available. | ||||
|  * | ||||
|  * The implementations of hv_dev_pread() and hv_dev_pwrite() are | ||||
|  * responsible for validating the devhdl value passed up by the | ||||
|  * client.  Since the device handle returned by hv_dev_open() should | ||||
|  * embed the positive service domain number, drivers should make sure | ||||
|  * that DRV_HDL2BITS(devhdl) is a legal service domain.  If the client | ||||
|  * passes an illegal service domain number, the routine should return | ||||
|  * GXIO_ERR_INVAL_SVC_DOM.  Once the service domain number has been | ||||
|  * validated, the driver can copy to/from the client buffer and call | ||||
|  * the dispatch_read() or dispatch_write() methods created by the RPC | ||||
|  * generator. | ||||
|  * | ||||
|  * The hv_dev_close() implementation should reset all service domain | ||||
|  * state and put the service domain back on a free list for | ||||
|  * reallocation by a future application.  In most cases, this will | ||||
|  * require executing a hardware reset or drain flow and denying any | ||||
|  * MMIO regions that were created for the service domain. | ||||
|  * | ||||
|  * @section iorpc_data Special Data Types | ||||
|  * | ||||
|  * The .idl file syntax allows the creation of syscalls with special | ||||
|  * parameters that require permission checks or translations as part | ||||
|  * of the system call path.  Because of limitations in the code | ||||
|  * generator, APIs are generally limited to just one of these special | ||||
|  * parameters per system call, and they are sometimes required to be | ||||
|  * the first or last parameter to the call.  Special parameters | ||||
|  * include: | ||||
|  * | ||||
|  * @subsection iorpc_mem_buffer MEM_BUFFER | ||||
|  * | ||||
|  * The MEM_BUFFER() datatype allows user space to "register" memory | ||||
|  * buffers with a device.  Registering memory accomplishes two tasks: | ||||
|  * Linux keeps track of all buffers that might be modified by a | ||||
|  * hardware device, and the hardware device drivers bind registered | ||||
|  * buffers to particular hardware resources like ingress NotifRings. | ||||
|  * The MEM_BUFFER() idl syntax can take extra flags like ALIGN_64KB, | ||||
|  * ALIGN_SELF_SIZE, and FLAGS indicating that memory buffers must have | ||||
|  * certain alignment or that the user should be able to pass a "memory | ||||
|  * flags" word specifying attributes like nt_hint or IO cache pinning. | ||||
|  * The parser will accept multiple MEM_BUFFER() flags. | ||||
|  * | ||||
|  * Implementations must obey the following conventions when | ||||
|  * registering memory buffers via the iorpc flow.  These rules are a | ||||
|  * result of the Linux driver implementation, which needs to keep | ||||
|  * track of how many times a particular page has been registered with | ||||
|  * the hardware so that it can release the page when all those | ||||
|  * registrations are cleared. | ||||
|  * | ||||
|  * - Memory registrations that refer to a resource which has already | ||||
|  * been bound must return GXIO_ERR_ALREADY_INIT.  Thus, it is an | ||||
|  * error to register memory twice without resetting (i.e. closing) the | ||||
|  * resource in between.  This convention keeps the Linux driver from | ||||
|  * having to track which particular devices a page is bound to. | ||||
|  * | ||||
|  * - At present, a memory registration is only cleared when the | ||||
|  * service domain is reset.  In this case, the Linux driver simply | ||||
|  * closes the HV device file handle and then decrements the reference | ||||
|  * counts of all pages that were previously registered with the | ||||
|  * device. | ||||
|  * | ||||
|  * - In the future, we may add a mechanism for unregistering memory. | ||||
|  * One possible implementation would require that the user specify | ||||
|  * which buffer is currently registered.  The HV would then verify | ||||
|  * that that page was actually the one currently mapped and return | ||||
|  * success or failure to Linux, which would then only decrement the | ||||
|  * page reference count if the addresses were mapped.  Another scheme | ||||
|  * might allow Linux to pass a token to the HV to be returned when the | ||||
|  * resource is unmapped. | ||||
|  * | ||||
|  * @subsection iorpc_interrupt INTERRUPT | ||||
|  * | ||||
|  * The INTERRUPT .idl datatype allows the client to bind hardware | ||||
|  * interrupts to a particular combination of IPI parameters - CPU, IPI | ||||
|  * PL, and event bit number.  This data is passed via a special | ||||
|  * datatype so that the Linux driver can validate the CPU and PL and | ||||
|  * the HV generic iorpc code can translate client CPUs to real CPUs. | ||||
|  * | ||||
|  * @subsection iorpc_pollfd_setup POLLFD_SETUP | ||||
|  * | ||||
|  * The POLLFD_SETUP .idl datatype allows the client to set up hardware | ||||
|  * interrupt bindings which are received by Linux but which are made | ||||
|  * visible to user processes as state transitions on a file descriptor; | ||||
|  * this allows user processes to use Linux primitives, such as poll(), to | ||||
|  * await particular hardware events.  This data is passed via a special | ||||
|  * datatype so that the Linux driver may recognize the pollable file | ||||
|  * descriptor and translate it to a set of interrupt target information, | ||||
|  * and so that the HV generic iorpc code can translate client CPUs to real | ||||
|  * CPUs. | ||||
|  * | ||||
|  * @subsection iorpc_pollfd POLLFD | ||||
|  * | ||||
|  * The POLLFD .idl datatype allows manipulation of hardware interrupt | ||||
|  * bindings set up via the POLLFD_SETUP datatype; common operations are | ||||
|  * resetting the state of the requested interrupt events, and unbinding any | ||||
|  * bound interrupts.  This data is passed via a special datatype so that | ||||
|  * the Linux driver may recognize the pollable file descriptor and | ||||
|  * translate it to an interrupt identifier previously supplied by the | ||||
|  * hypervisor as the result of an earlier pollfd_setup operation. | ||||
|  * | ||||
|  * @subsection iorpc_blob BLOB | ||||
|  * | ||||
|  * The BLOB .idl datatype allows the client to write an arbitrary | ||||
|  * length string of bytes up to the hypervisor driver.  This can be | ||||
|  * useful for passing up large, arbitrarily structured data like | ||||
|  * classifier programs.  The iorpc stack takes care of validating the | ||||
|  * buffer VA and CPA as the data passes up to the hypervisor.  Unlike | ||||
|  * MEM_BUFFER(), the buffer is not registered - Linux does not bump | ||||
|  * page refcounts and the HV driver should not reuse the buffer once | ||||
|  * the system call is complete. | ||||
|  * | ||||
|  * @section iorpc_translation Translating User Space Calls | ||||
|  * | ||||
|  * The ::iorpc_offset structure describes the formatting of the offset | ||||
|  * that is passed to pread() or pwrite() as part of the generated RPC code. | ||||
|  * When the user calls up to Linux, the rpc code fills in all the fields of | ||||
|  * the offset, including a 16-bit opcode, a 16 bit format indicator, and 32 | ||||
|  * bits of user-specified "sub-offset".  The opcode indicates which syscall | ||||
|  * is being requested.  The format indicates whether there is a "prefix | ||||
|  * struct" at the start of the memory buffer passed to pwrite(), and if so | ||||
|  * what data is in that prefix struct.  These prefix structs are used to | ||||
|  * implement special datatypes like MEM_BUFFER() and INTERRUPT - we arrange | ||||
|  * to put data that needs translation and permission checks at the start of | ||||
|  * the buffer so that the Linux driver and generic portions of the HV iorpc | ||||
|  * code can easily access the data.  The 32 bits of user-specified | ||||
|  * "sub-offset" are most useful for pread() calls where the user needs to | ||||
|  * also pass in a few bits indicating which register to read, etc. | ||||
|  * | ||||
|  * The Linux iorpc driver watches for system calls that contain prefix | ||||
|  * structs so that it can translate parameters and bump reference | ||||
|  * counts as appropriate.  It does not (currently) have any knowledge | ||||
|  * of the per-device opcodes - it doesn't care what operation you're | ||||
|  * doing to mPIPE, so long as it can do all the generic book-keeping. | ||||
|  * The hv/iorpc.h header file defines all of the generic encoding bits | ||||
|  * needed to translate iorpc calls without knowing which particular | ||||
|  * opcode is being issued. | ||||
|  * | ||||
|  * @section iorpc_globals Global iorpc Calls | ||||
|  * | ||||
|  * Implementing mmap() required adding some special iorpc syscalls | ||||
|  * that are only called by the Linux driver, never by userspace. | ||||
|  * These include get_mmio_base() and check_mmio_offset().  These | ||||
|  * routines are described in globals.idl and must be included in every | ||||
|  * iorpc driver.  By providing these routines in every driver, Linux's | ||||
|  * mmap implementation can easily get the PTE bits it needs and | ||||
|  * validate the PA offset without needing to know the per-device | ||||
|  * opcodes to perform those tasks. | ||||
|  * | ||||
|  * @section iorpc_kernel Supporting gxio APIs in the Kernel | ||||
|  * | ||||
|  * The iorpc code generator also supports generation of kernel code | ||||
|  * implementing the gxio APIs.  This capability is currently used by | ||||
|  * the mPIPE network driver, and will likely be used by the TRIO root | ||||
|  * complex and endpoint drivers and perhaps an in-kernel crypto | ||||
|  * driver.  Each driver that wants to instantiate iorpc calls in the | ||||
|  * kernel needs to generate a kernel version of the generate rpc code | ||||
|  * and (probably) copy any related gxio source files into the kernel. | ||||
|  * The mPIPE driver provides a good example of this pattern. | ||||
|  */ | ||||
| 
 | ||||
| #ifdef __KERNEL__ | ||||
| #include <linux/stddef.h> | ||||
| #else | ||||
| #include <stddef.h> | ||||
| #endif | ||||
| 
 | ||||
| #if defined(__HV__) | ||||
| #include <hv/hypervisor.h> | ||||
| #elif defined(__KERNEL__) | ||||
| #include "hypervisor.h" | ||||
| #include <linux/types.h> | ||||
| #else | ||||
| #include <stdint.h> | ||||
| #endif | ||||
| 
 | ||||
| 
 | ||||
| /** Code indicating translation services required within the RPC path.
 | ||||
|  * These indicate whether there is a translatable struct at the start | ||||
|  * of the RPC buffer and what information that struct contains. | ||||
|  */ | ||||
| enum iorpc_format_e | ||||
| { | ||||
|   /** No translation required, no prefix struct. */ | ||||
|   IORPC_FORMAT_NONE, | ||||
| 
 | ||||
|   /** No translation required, no prefix struct, no access to this
 | ||||
|    *  operation from user space. */ | ||||
|   IORPC_FORMAT_NONE_NOUSER, | ||||
| 
 | ||||
|   /** Prefix struct contains user VA and size. */ | ||||
|   IORPC_FORMAT_USER_MEM, | ||||
| 
 | ||||
|   /** Prefix struct contains CPA, size, and homing bits. */ | ||||
|   IORPC_FORMAT_KERNEL_MEM, | ||||
| 
 | ||||
|   /** Prefix struct contains interrupt. */ | ||||
|   IORPC_FORMAT_KERNEL_INTERRUPT, | ||||
| 
 | ||||
|   /** Prefix struct contains user-level interrupt. */ | ||||
|   IORPC_FORMAT_USER_INTERRUPT, | ||||
| 
 | ||||
|   /** Prefix struct contains pollfd_setup (interrupt information). */ | ||||
|   IORPC_FORMAT_KERNEL_POLLFD_SETUP, | ||||
| 
 | ||||
|   /** Prefix struct contains user-level pollfd_setup (file descriptor). */ | ||||
|   IORPC_FORMAT_USER_POLLFD_SETUP, | ||||
| 
 | ||||
|   /** Prefix struct contains pollfd (interrupt cookie). */ | ||||
|   IORPC_FORMAT_KERNEL_POLLFD, | ||||
| 
 | ||||
|   /** Prefix struct contains user-level pollfd (file descriptor). */ | ||||
|   IORPC_FORMAT_USER_POLLFD, | ||||
| }; | ||||
| 
 | ||||
| 
 | ||||
| /** Generate an opcode given format and code. */ | ||||
| #define IORPC_OPCODE(FORMAT, CODE) (((FORMAT) << 16) | (CODE)) | ||||
| 
 | ||||
| /** The offset passed through the read() and write() system calls
 | ||||
|     combines an opcode with 32 bits of user-specified offset. */ | ||||
| union iorpc_offset | ||||
| { | ||||
| #ifndef __BIG_ENDIAN__ | ||||
|   uint64_t offset;              /**< All bits. */ | ||||
| 
 | ||||
|   struct | ||||
|   { | ||||
|     uint16_t code;              /**< RPC code. */ | ||||
|     uint16_t format;            /**< iorpc_format_e */ | ||||
|     uint32_t sub_offset;        /**< caller-specified offset. */ | ||||
|   }; | ||||
| 
 | ||||
|   uint32_t opcode;              /**< Opcode combines code & format. */ | ||||
| #else | ||||
|   uint64_t offset;              /**< All bits. */ | ||||
| 
 | ||||
|   struct | ||||
|   { | ||||
|     uint32_t sub_offset;        /**< caller-specified offset. */ | ||||
|     uint16_t format;            /**< iorpc_format_e */ | ||||
|     uint16_t code;              /**< RPC code. */ | ||||
|   }; | ||||
| 
 | ||||
|   struct | ||||
|   { | ||||
|     uint32_t padding; | ||||
|     uint32_t opcode;              /**< Opcode combines code & format. */ | ||||
|   }; | ||||
| #endif | ||||
| }; | ||||
| 
 | ||||
| 
 | ||||
| /** Homing and cache hinting bits that can be used by IO devices. */ | ||||
| struct iorpc_mem_attr | ||||
| { | ||||
|   unsigned int lotar_x:4;       /**< lotar X bits (or Gx page_mask). */ | ||||
|   unsigned int lotar_y:4;       /**< lotar Y bits (or Gx page_offset). */ | ||||
|   unsigned int hfh:1;           /**< Uses hash-for-home. */ | ||||
|   unsigned int nt_hint:1;       /**< Non-temporal hint. */ | ||||
|   unsigned int io_pin:1;        /**< Only fill 'IO' cache ways. */ | ||||
| }; | ||||
| 
 | ||||
| /** Set the nt_hint bit. */ | ||||
| #define IORPC_MEM_BUFFER_FLAG_NT_HINT (1 << 0) | ||||
| 
 | ||||
| /** Set the IO pin bit. */ | ||||
| #define IORPC_MEM_BUFFER_FLAG_IO_PIN (1 << 1) | ||||
| 
 | ||||
| 
 | ||||
| /** A structure used to describe memory registration.  Different
 | ||||
|     protection levels describe memory differently, so this union | ||||
|     contains all the different possible descriptions.  As a request | ||||
|     moves up the call chain, each layer translates from one | ||||
|     description format to the next.  In particular, the Linux iorpc | ||||
|     driver translates user VAs into CPAs and homing parameters. */ | ||||
| union iorpc_mem_buffer | ||||
| { | ||||
|   struct | ||||
|   { | ||||
|     uint64_t va;                /**< User virtual address. */ | ||||
|     uint64_t size;              /**< Buffer size. */ | ||||
|     unsigned int flags;         /**< nt_hint, IO pin. */ | ||||
|   } | ||||
|   user;                         /**< Buffer as described by user apps. */ | ||||
| 
 | ||||
|   struct | ||||
|   { | ||||
|     unsigned long long cpa;     /**< Client physical address. */ | ||||
| #if defined(__KERNEL__) || defined(__HV__) | ||||
|     size_t size;                /**< Buffer size. */ | ||||
|     HV_PTE pte;                 /**< PTE describing memory homing. */ | ||||
| #else | ||||
|     uint64_t size; | ||||
|     uint64_t pte; | ||||
| #endif | ||||
|     unsigned int flags;         /**< nt_hint, IO pin. */ | ||||
|   } | ||||
|   kernel;                       /**< Buffer as described by kernel. */ | ||||
| 
 | ||||
|   struct | ||||
|   { | ||||
|     unsigned long long pa;      /**< Physical address. */ | ||||
|     size_t size;                /**< Buffer size. */ | ||||
|     struct iorpc_mem_attr attr;      /**< Homing and locality hint bits. */ | ||||
|   } | ||||
|   hv;                           /**< Buffer parameters for HV driver. */ | ||||
| }; | ||||
| 
 | ||||
| 
 | ||||
| /** A structure used to describe interrupts.  The format differs slightly
 | ||||
|  *  for user and kernel interrupts.  As with the mem_buffer_t, translation | ||||
|  *  between the formats is done at each level. */ | ||||
| union iorpc_interrupt | ||||
| { | ||||
|   struct | ||||
|   { | ||||
|     int cpu;   /**< CPU. */ | ||||
|     int event; /**< evt_num */ | ||||
|   } | ||||
|   user;        /**< Interrupt as described by user applications. */ | ||||
| 
 | ||||
|   struct | ||||
|   { | ||||
|     int x;     /**< X coord. */ | ||||
|     int y;     /**< Y coord. */ | ||||
|     int ipi;   /**< int_num */ | ||||
|     int event; /**< evt_num */ | ||||
|   } | ||||
|   kernel;      /**< Interrupt as described by the kernel. */ | ||||
| 
 | ||||
| }; | ||||
| 
 | ||||
| 
 | ||||
| /** A structure used to describe interrupts used with poll().  The format
 | ||||
|  *  differs significantly for requests from user to kernel, and kernel to | ||||
|  *  hypervisor.  As with the mem_buffer_t, translation between the formats | ||||
|  *  is done at each level. */ | ||||
| union iorpc_pollfd_setup | ||||
| { | ||||
|   struct | ||||
|   { | ||||
|     int fd;    /**< Pollable file descriptor. */ | ||||
|   } | ||||
|   user;        /**< pollfd_setup as described by user applications. */ | ||||
| 
 | ||||
|   struct | ||||
|   { | ||||
|     int x;     /**< X coord. */ | ||||
|     int y;     /**< Y coord. */ | ||||
|     int ipi;   /**< int_num */ | ||||
|     int event; /**< evt_num */ | ||||
|   } | ||||
|   kernel;      /**< pollfd_setup as described by the kernel. */ | ||||
| 
 | ||||
| }; | ||||
| 
 | ||||
| 
 | ||||
| /** A structure used to describe previously set up interrupts used with
 | ||||
|  *  poll().  The format differs significantly for requests from user to | ||||
|  *  kernel, and kernel to hypervisor.  As with the mem_buffer_t, translation | ||||
|  *  between the formats is done at each level. */ | ||||
| union iorpc_pollfd | ||||
| { | ||||
|   struct | ||||
|   { | ||||
|     int fd;    /**< Pollable file descriptor. */ | ||||
|   } | ||||
|   user;        /**< pollfd as described by user applications. */ | ||||
| 
 | ||||
|   struct | ||||
|   { | ||||
|     int cookie; /**< hv cookie returned by the pollfd_setup operation. */ | ||||
|   } | ||||
|   kernel;      /**< pollfd as described by the kernel. */ | ||||
| 
 | ||||
| }; | ||||
| 
 | ||||
| 
 | ||||
| /** The various iorpc devices use error codes from -1100 to -1299.
 | ||||
|  * | ||||
|  * This range is distinct from netio (-700 to -799), the hypervisor | ||||
|  * (-800 to -899), tilepci (-900 to -999), ilib (-1000 to -1099), | ||||
|  * gxcr (-1300 to -1399) and gxpci (-1400 to -1499). | ||||
|  */ | ||||
| enum gxio_err_e { | ||||
| 
 | ||||
|   /** Largest iorpc error number. */ | ||||
|   GXIO_ERR_MAX = -1101, | ||||
| 
 | ||||
| 
 | ||||
|   /********************************************************/ | ||||
|   /*                   Generic Error Codes                */ | ||||
|   /********************************************************/ | ||||
| 
 | ||||
|   /** Bad RPC opcode - possible version incompatibility. */ | ||||
|   GXIO_ERR_OPCODE = -1101, | ||||
| 
 | ||||
|   /** Invalid parameter. */ | ||||
|   GXIO_ERR_INVAL = -1102, | ||||
| 
 | ||||
|   /** Memory buffer did not meet alignment requirements. */ | ||||
|   GXIO_ERR_ALIGNMENT = -1103, | ||||
| 
 | ||||
|   /** Memory buffers must be coherent and cacheable. */ | ||||
|   GXIO_ERR_COHERENCE = -1104, | ||||
| 
 | ||||
|   /** Resource already initialized. */ | ||||
|   GXIO_ERR_ALREADY_INIT = -1105, | ||||
| 
 | ||||
|   /** No service domains available. */ | ||||
|   GXIO_ERR_NO_SVC_DOM = -1106, | ||||
| 
 | ||||
|   /** Illegal service domain number. */ | ||||
|   GXIO_ERR_INVAL_SVC_DOM = -1107, | ||||
| 
 | ||||
|   /** Illegal MMIO address. */ | ||||
|   GXIO_ERR_MMIO_ADDRESS = -1108, | ||||
| 
 | ||||
|   /** Illegal interrupt binding. */ | ||||
|   GXIO_ERR_INTERRUPT = -1109, | ||||
| 
 | ||||
|   /** Unreasonable client memory. */ | ||||
|   GXIO_ERR_CLIENT_MEMORY = -1110, | ||||
| 
 | ||||
|   /** No more IOTLB entries. */ | ||||
|   GXIO_ERR_IOTLB_ENTRY = -1111, | ||||
| 
 | ||||
|   /** Invalid memory size. */ | ||||
|   GXIO_ERR_INVAL_MEMORY_SIZE = -1112, | ||||
| 
 | ||||
|   /** Unsupported operation. */ | ||||
|   GXIO_ERR_UNSUPPORTED_OP = -1113, | ||||
| 
 | ||||
|   /** Insufficient DMA credits. */ | ||||
|   GXIO_ERR_DMA_CREDITS = -1114, | ||||
| 
 | ||||
|   /** Operation timed out. */ | ||||
|   GXIO_ERR_TIMEOUT = -1115, | ||||
| 
 | ||||
|   /** No such device or object. */ | ||||
|   GXIO_ERR_NO_DEVICE = -1116, | ||||
| 
 | ||||
|   /** Device or resource busy. */ | ||||
|   GXIO_ERR_BUSY = -1117, | ||||
| 
 | ||||
|   /** I/O error. */ | ||||
|   GXIO_ERR_IO = -1118, | ||||
| 
 | ||||
|   /** Permissions error. */ | ||||
|   GXIO_ERR_PERM = -1119, | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
|   /********************************************************/ | ||||
|   /*                 Test Device Error Codes              */ | ||||
|   /********************************************************/ | ||||
| 
 | ||||
|   /** Illegal register number. */ | ||||
|   GXIO_TEST_ERR_REG_NUMBER = -1120, | ||||
| 
 | ||||
|   /** Illegal buffer slot. */ | ||||
|   GXIO_TEST_ERR_BUFFER_SLOT = -1121, | ||||
| 
 | ||||
| 
 | ||||
|   /********************************************************/ | ||||
|   /*                    MPIPE Error Codes                 */ | ||||
|   /********************************************************/ | ||||
| 
 | ||||
| 
 | ||||
|   /** Invalid buffer size. */ | ||||
|   GXIO_MPIPE_ERR_INVAL_BUFFER_SIZE = -1131, | ||||
| 
 | ||||
|   /** Cannot allocate buffer stack. */ | ||||
|   GXIO_MPIPE_ERR_NO_BUFFER_STACK = -1140, | ||||
| 
 | ||||
|   /** Invalid buffer stack number. */ | ||||
|   GXIO_MPIPE_ERR_BAD_BUFFER_STACK = -1141, | ||||
| 
 | ||||
|   /** Cannot allocate NotifRing. */ | ||||
|   GXIO_MPIPE_ERR_NO_NOTIF_RING = -1142, | ||||
| 
 | ||||
|   /** Invalid NotifRing number. */ | ||||
|   GXIO_MPIPE_ERR_BAD_NOTIF_RING = -1143, | ||||
| 
 | ||||
|   /** Cannot allocate NotifGroup. */ | ||||
|   GXIO_MPIPE_ERR_NO_NOTIF_GROUP = -1144, | ||||
| 
 | ||||
|   /** Invalid NotifGroup number. */ | ||||
|   GXIO_MPIPE_ERR_BAD_NOTIF_GROUP = -1145, | ||||
| 
 | ||||
|   /** Cannot allocate bucket. */ | ||||
|   GXIO_MPIPE_ERR_NO_BUCKET = -1146, | ||||
| 
 | ||||
|   /** Invalid bucket number. */ | ||||
|   GXIO_MPIPE_ERR_BAD_BUCKET = -1147, | ||||
| 
 | ||||
|   /** Cannot allocate eDMA ring. */ | ||||
|   GXIO_MPIPE_ERR_NO_EDMA_RING = -1148, | ||||
| 
 | ||||
|   /** Invalid eDMA ring number. */ | ||||
|   GXIO_MPIPE_ERR_BAD_EDMA_RING = -1149, | ||||
| 
 | ||||
|   /** Invalid channel number. */ | ||||
|   GXIO_MPIPE_ERR_BAD_CHANNEL = -1150, | ||||
| 
 | ||||
|   /** Bad configuration. */ | ||||
|   GXIO_MPIPE_ERR_BAD_CONFIG = -1151, | ||||
| 
 | ||||
|   /** Empty iqueue. */ | ||||
|   GXIO_MPIPE_ERR_IQUEUE_EMPTY = -1152, | ||||
| 
 | ||||
|   /** Empty rules. */ | ||||
|   GXIO_MPIPE_ERR_RULES_EMPTY = -1160, | ||||
| 
 | ||||
|   /** Full rules. */ | ||||
|   GXIO_MPIPE_ERR_RULES_FULL = -1161, | ||||
| 
 | ||||
|   /** Corrupt rules. */ | ||||
|   GXIO_MPIPE_ERR_RULES_CORRUPT = -1162, | ||||
| 
 | ||||
|   /** Invalid rules. */ | ||||
|   GXIO_MPIPE_ERR_RULES_INVALID = -1163, | ||||
| 
 | ||||
|   /** Classifier is too big. */ | ||||
|   GXIO_MPIPE_ERR_CLASSIFIER_TOO_BIG = -1170, | ||||
| 
 | ||||
|   /** Classifier is too complex. */ | ||||
|   GXIO_MPIPE_ERR_CLASSIFIER_TOO_COMPLEX = -1171, | ||||
| 
 | ||||
|   /** Classifier has bad header. */ | ||||
|   GXIO_MPIPE_ERR_CLASSIFIER_BAD_HEADER = -1172, | ||||
| 
 | ||||
|   /** Classifier has bad contents. */ | ||||
|   GXIO_MPIPE_ERR_CLASSIFIER_BAD_CONTENTS = -1173, | ||||
| 
 | ||||
|   /** Classifier encountered invalid symbol. */ | ||||
|   GXIO_MPIPE_ERR_CLASSIFIER_INVAL_SYMBOL = -1174, | ||||
| 
 | ||||
|   /** Classifier encountered invalid bounds. */ | ||||
|   GXIO_MPIPE_ERR_CLASSIFIER_INVAL_BOUNDS = -1175, | ||||
| 
 | ||||
|   /** Classifier encountered invalid relocation. */ | ||||
|   GXIO_MPIPE_ERR_CLASSIFIER_INVAL_RELOCATION = -1176, | ||||
| 
 | ||||
|   /** Classifier encountered undefined symbol. */ | ||||
|   GXIO_MPIPE_ERR_CLASSIFIER_UNDEF_SYMBOL = -1177, | ||||
| 
 | ||||
| 
 | ||||
|   /********************************************************/ | ||||
|   /*                    TRIO  Error Codes                 */ | ||||
|   /********************************************************/ | ||||
| 
 | ||||
|   /** Cannot allocate memory map region. */ | ||||
|   GXIO_TRIO_ERR_NO_MEMORY_MAP = -1180, | ||||
| 
 | ||||
|   /** Invalid memory map region number. */ | ||||
|   GXIO_TRIO_ERR_BAD_MEMORY_MAP = -1181, | ||||
| 
 | ||||
|   /** Cannot allocate scatter queue. */ | ||||
|   GXIO_TRIO_ERR_NO_SCATTER_QUEUE = -1182, | ||||
| 
 | ||||
|   /** Invalid scatter queue number. */ | ||||
|   GXIO_TRIO_ERR_BAD_SCATTER_QUEUE = -1183, | ||||
| 
 | ||||
|   /** Cannot allocate push DMA ring. */ | ||||
|   GXIO_TRIO_ERR_NO_PUSH_DMA_RING = -1184, | ||||
| 
 | ||||
|   /** Invalid push DMA ring index. */ | ||||
|   GXIO_TRIO_ERR_BAD_PUSH_DMA_RING = -1185, | ||||
| 
 | ||||
|   /** Cannot allocate pull DMA ring. */ | ||||
|   GXIO_TRIO_ERR_NO_PULL_DMA_RING = -1186, | ||||
| 
 | ||||
|   /** Invalid pull DMA ring index. */ | ||||
|   GXIO_TRIO_ERR_BAD_PULL_DMA_RING = -1187, | ||||
| 
 | ||||
|   /** Cannot allocate PIO region. */ | ||||
|   GXIO_TRIO_ERR_NO_PIO = -1188, | ||||
| 
 | ||||
|   /** Invalid PIO region index. */ | ||||
|   GXIO_TRIO_ERR_BAD_PIO = -1189, | ||||
| 
 | ||||
|   /** Cannot allocate ASID. */ | ||||
|   GXIO_TRIO_ERR_NO_ASID = -1190, | ||||
| 
 | ||||
|   /** Invalid ASID. */ | ||||
|   GXIO_TRIO_ERR_BAD_ASID = -1191, | ||||
| 
 | ||||
| 
 | ||||
|   /********************************************************/ | ||||
|   /*                    MICA Error Codes                  */ | ||||
|   /********************************************************/ | ||||
| 
 | ||||
|   /** No such accelerator type. */ | ||||
|   GXIO_MICA_ERR_BAD_ACCEL_TYPE = -1220, | ||||
| 
 | ||||
|   /** Cannot allocate context. */ | ||||
|   GXIO_MICA_ERR_NO_CONTEXT = -1221, | ||||
| 
 | ||||
|   /** PKA command queue is full, can't add another command. */ | ||||
|   GXIO_MICA_ERR_PKA_CMD_QUEUE_FULL = -1222, | ||||
| 
 | ||||
|   /** PKA result queue is empty, can't get a result from the queue. */ | ||||
|   GXIO_MICA_ERR_PKA_RESULT_QUEUE_EMPTY = -1223, | ||||
| 
 | ||||
|   /********************************************************/ | ||||
|   /*                    GPIO Error Codes                  */ | ||||
|   /********************************************************/ | ||||
| 
 | ||||
|   /** Pin not available.  Either the physical pin does not exist, or
 | ||||
|    *  it is reserved by the hypervisor for system usage. */ | ||||
|   GXIO_GPIO_ERR_PIN_UNAVAILABLE = -1240, | ||||
| 
 | ||||
|   /** Pin busy.  The pin exists, and is available for use via GXIO, but
 | ||||
|    *  it has been attached by some other process or driver. */ | ||||
|   GXIO_GPIO_ERR_PIN_BUSY = -1241, | ||||
| 
 | ||||
|   /** Cannot access unattached pin.  One or more of the pins being
 | ||||
|    *  manipulated by this call are not attached to the requesting | ||||
|    *  context. */ | ||||
|   GXIO_GPIO_ERR_PIN_UNATTACHED = -1242, | ||||
| 
 | ||||
|   /** Invalid I/O mode for pin.  The wiring of the pin in the system
 | ||||
|    *  is such that the I/O mode or electrical control parameters | ||||
|    *  requested could cause damage. */ | ||||
|   GXIO_GPIO_ERR_PIN_INVALID_MODE = -1243, | ||||
| 
 | ||||
|   /** Smallest iorpc error number. */ | ||||
|   GXIO_ERR_MIN = -1299 | ||||
| }; | ||||
| 
 | ||||
| 
 | ||||
| #endif /* !_HV_IORPC_H_ */ | ||||
| @ -14,4 +14,9 @@ obj-$(CONFIG_SMP)		+= smpboot.o smp.o tlb.o | ||||
| obj-$(CONFIG_MODULES)		+= module.o | ||||
| obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o | ||||
| obj-$(CONFIG_KEXEC)		+= machine_kexec.o relocate_kernel_$(BITS).o | ||||
| ifdef CONFIG_TILEGX | ||||
| obj-$(CONFIG_PCI)		+= pci_gx.o | ||||
| else | ||||
| obj-$(CONFIG_PCI)		+= pci.o | ||||
| endif | ||||
| obj-$(CONFIG_TILE_USB)		+= usb.o | ||||
|  | ||||
| @ -14,6 +14,7 @@ | ||||
| 
 | ||||
| #include <linux/mm.h> | ||||
| #include <linux/dma-mapping.h> | ||||
| #include <linux/swiotlb.h> | ||||
| #include <linux/vmalloc.h> | ||||
| #include <linux/export.h> | ||||
| #include <asm/tlbflush.h> | ||||
| @ -22,13 +23,18 @@ | ||||
| /* Generic DMA mapping functions: */ | ||||
| 
 | ||||
| /*
 | ||||
|  * Allocate what Linux calls "coherent" memory, which for us just | ||||
|  * means uncached. | ||||
|  * Allocate what Linux calls "coherent" memory.  On TILEPro this is | ||||
|  * uncached memory; on TILE-Gx it is hash-for-home memory. | ||||
|  */ | ||||
| void *dma_alloc_coherent(struct device *dev, | ||||
| 			 size_t size, | ||||
| 			 dma_addr_t *dma_handle, | ||||
| 			 gfp_t gfp) | ||||
| #ifdef __tilepro__ | ||||
| #define PAGE_HOME_DMA PAGE_HOME_UNCACHED | ||||
| #else | ||||
| #define PAGE_HOME_DMA PAGE_HOME_HASH | ||||
| #endif | ||||
| 
 | ||||
| static void *tile_dma_alloc_coherent(struct device *dev, size_t size, | ||||
| 				     dma_addr_t *dma_handle, gfp_t gfp, | ||||
| 				     struct dma_attrs *attrs) | ||||
| { | ||||
| 	u64 dma_mask = dev->coherent_dma_mask ?: DMA_BIT_MASK(32); | ||||
| 	int node = dev_to_node(dev); | ||||
| @ -39,39 +45,42 @@ void *dma_alloc_coherent(struct device *dev, | ||||
| 	gfp |= __GFP_ZERO; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * By forcing NUMA node 0 for 32-bit masks we ensure that the | ||||
| 	 * high 32 bits of the resulting PA will be zero.  If the mask | ||||
| 	 * size is, e.g., 24, we may still not be able to guarantee a | ||||
| 	 * suitable memory address, in which case we will return NULL. | ||||
| 	 * But such devices are uncommon. | ||||
| 	 * If the mask specifies that the memory be in the first 4 GB, then | ||||
| 	 * we force the allocation to come from the DMA zone.  We also | ||||
| 	 * force the node to 0 since that's the only node where the DMA | ||||
| 	 * zone isn't empty.  If the mask size is smaller than 32 bits, we | ||||
| 	 * may still not be able to guarantee a suitable memory address, in | ||||
| 	 * which case we will return NULL.  But such devices are uncommon. | ||||
| 	 */ | ||||
| 	if (dma_mask <= DMA_BIT_MASK(32)) | ||||
| 	if (dma_mask <= DMA_BIT_MASK(32)) { | ||||
| 		gfp |= GFP_DMA; | ||||
| 		node = 0; | ||||
| 	} | ||||
| 
 | ||||
| 	pg = homecache_alloc_pages_node(node, gfp, order, PAGE_HOME_UNCACHED); | ||||
| 	pg = homecache_alloc_pages_node(node, gfp, order, PAGE_HOME_DMA); | ||||
| 	if (pg == NULL) | ||||
| 		return NULL; | ||||
| 
 | ||||
| 	addr = page_to_phys(pg); | ||||
| 	if (addr + size > dma_mask) { | ||||
| 		homecache_free_pages(addr, order); | ||||
| 		__homecache_free_pages(pg, order); | ||||
| 		return NULL; | ||||
| 	} | ||||
| 
 | ||||
| 	*dma_handle = addr; | ||||
| 
 | ||||
| 	return page_address(pg); | ||||
| } | ||||
| EXPORT_SYMBOL(dma_alloc_coherent); | ||||
| 
 | ||||
| /*
 | ||||
|  * Free memory that was allocated with dma_alloc_coherent. | ||||
|  * Free memory that was allocated with tile_dma_alloc_coherent. | ||||
|  */ | ||||
| void dma_free_coherent(struct device *dev, size_t size, | ||||
| 		  void *vaddr, dma_addr_t dma_handle) | ||||
| static void tile_dma_free_coherent(struct device *dev, size_t size, | ||||
| 				   void *vaddr, dma_addr_t dma_handle, | ||||
| 				   struct dma_attrs *attrs) | ||||
| { | ||||
| 	homecache_free_pages((unsigned long)vaddr, get_order(size)); | ||||
| } | ||||
| EXPORT_SYMBOL(dma_free_coherent); | ||||
| 
 | ||||
| /*
 | ||||
|  * The map routines "map" the specified address range for DMA | ||||
| @ -87,52 +96,112 @@ EXPORT_SYMBOL(dma_free_coherent); | ||||
|  * can count on nothing having been touched. | ||||
|  */ | ||||
| 
 | ||||
| /* Flush a PA range from cache page by page. */ | ||||
| static void __dma_map_pa_range(dma_addr_t dma_addr, size_t size) | ||||
| /* Set up a single page for DMA access. */ | ||||
| static void __dma_prep_page(struct page *page, unsigned long offset, | ||||
| 			    size_t size, enum dma_data_direction direction) | ||||
| { | ||||
| 	/*
 | ||||
| 	 * Flush the page from cache if necessary. | ||||
| 	 * On tilegx, data is delivered to hash-for-home L3; on tilepro, | ||||
| 	 * data is delivered direct to memory. | ||||
| 	 * | ||||
| 	 * NOTE: If we were just doing DMA_TO_DEVICE we could optimize | ||||
| 	 * this to be a "flush" not a "finv" and keep some of the | ||||
| 	 * state in cache across the DMA operation, but it doesn't seem | ||||
| 	 * worth creating the necessary flush_buffer_xxx() infrastructure. | ||||
| 	 */ | ||||
| 	int home = page_home(page); | ||||
| 	switch (home) { | ||||
| 	case PAGE_HOME_HASH: | ||||
| #ifdef __tilegx__ | ||||
| 		return; | ||||
| #endif | ||||
| 		break; | ||||
| 	case PAGE_HOME_UNCACHED: | ||||
| #ifdef __tilepro__ | ||||
| 		return; | ||||
| #endif | ||||
| 		break; | ||||
| 	case PAGE_HOME_IMMUTABLE: | ||||
| 		/* Should be going to the device only. */ | ||||
| 		BUG_ON(direction == DMA_FROM_DEVICE || | ||||
| 		       direction == DMA_BIDIRECTIONAL); | ||||
| 		return; | ||||
| 	case PAGE_HOME_INCOHERENT: | ||||
| 		/* Incoherent anyway, so no need to work hard here. */ | ||||
| 		return; | ||||
| 	default: | ||||
| 		BUG_ON(home < 0 || home >= NR_CPUS); | ||||
| 		break; | ||||
| 	} | ||||
| 	homecache_finv_page(page); | ||||
| 
 | ||||
| #ifdef DEBUG_ALIGNMENT | ||||
| 	/* Warn if the region isn't cacheline aligned. */ | ||||
| 	if (offset & (L2_CACHE_BYTES - 1) || (size & (L2_CACHE_BYTES - 1))) | ||||
| 		pr_warn("Unaligned DMA to non-hfh memory: PA %#llx/%#lx\n", | ||||
| 			PFN_PHYS(page_to_pfn(page)) + offset, size); | ||||
| #endif | ||||
| } | ||||
| 
 | ||||
| /* Make the page ready to be read by the core. */ | ||||
| static void __dma_complete_page(struct page *page, unsigned long offset, | ||||
| 				size_t size, enum dma_data_direction direction) | ||||
| { | ||||
| #ifdef __tilegx__ | ||||
| 	switch (page_home(page)) { | ||||
| 	case PAGE_HOME_HASH: | ||||
| 		/* I/O device delivered data the way the cpu wanted it. */ | ||||
| 		break; | ||||
| 	case PAGE_HOME_INCOHERENT: | ||||
| 		/* Incoherent anyway, so no need to work hard here. */ | ||||
| 		break; | ||||
| 	case PAGE_HOME_IMMUTABLE: | ||||
| 		/* Extra read-only copies are not a problem. */ | ||||
| 		break; | ||||
| 	default: | ||||
| 		/* Flush the bogus hash-for-home I/O entries to memory. */ | ||||
| 		homecache_finv_map_page(page, PAGE_HOME_HASH); | ||||
| 		break; | ||||
| 	} | ||||
| #endif | ||||
| } | ||||
| 
 | ||||
| static void __dma_prep_pa_range(dma_addr_t dma_addr, size_t size, | ||||
| 				enum dma_data_direction direction) | ||||
| { | ||||
| 	struct page *page = pfn_to_page(PFN_DOWN(dma_addr)); | ||||
| 	size_t bytesleft = PAGE_SIZE - (dma_addr & (PAGE_SIZE - 1)); | ||||
| 	unsigned long offset = dma_addr & (PAGE_SIZE - 1); | ||||
| 	size_t bytes = min(size, (size_t)(PAGE_SIZE - offset)); | ||||
| 
 | ||||
| 	while ((ssize_t)size > 0) { | ||||
| 		/* Flush the page. */ | ||||
| 		homecache_flush_cache(page++, 0); | ||||
| 
 | ||||
| 		/* Figure out if we need to continue on the next page. */ | ||||
| 		size -= bytesleft; | ||||
| 		bytesleft = PAGE_SIZE; | ||||
| 	while (size != 0) { | ||||
| 		__dma_prep_page(page, offset, bytes, direction); | ||||
| 		size -= bytes; | ||||
| 		++page; | ||||
| 		offset = 0; | ||||
| 		bytes = min((size_t)PAGE_SIZE, size); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * dma_map_single can be passed any memory address, and there appear | ||||
|  * to be no alignment constraints. | ||||
|  * | ||||
|  * There is a chance that the start of the buffer will share a cache | ||||
|  * line with some other data that has been touched in the meantime. | ||||
|  */ | ||||
| dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | ||||
| 	       enum dma_data_direction direction) | ||||
| static void __dma_complete_pa_range(dma_addr_t dma_addr, size_t size, | ||||
| 				    enum dma_data_direction direction) | ||||
| { | ||||
| 	dma_addr_t dma_addr = __pa(ptr); | ||||
| 	struct page *page = pfn_to_page(PFN_DOWN(dma_addr)); | ||||
| 	unsigned long offset = dma_addr & (PAGE_SIZE - 1); | ||||
| 	size_t bytes = min(size, (size_t)(PAGE_SIZE - offset)); | ||||
| 
 | ||||
| 	BUG_ON(!valid_dma_direction(direction)); | ||||
| 	WARN_ON(size == 0); | ||||
| 
 | ||||
| 	__dma_map_pa_range(dma_addr, size); | ||||
| 
 | ||||
| 	return dma_addr; | ||||
| 	while (size != 0) { | ||||
| 		__dma_complete_page(page, offset, bytes, direction); | ||||
| 		size -= bytes; | ||||
| 		++page; | ||||
| 		offset = 0; | ||||
| 		bytes = min((size_t)PAGE_SIZE, size); | ||||
| 	} | ||||
| } | ||||
| EXPORT_SYMBOL(dma_map_single); | ||||
| 
 | ||||
| void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | ||||
| 		 enum dma_data_direction direction) | ||||
| { | ||||
| 	BUG_ON(!valid_dma_direction(direction)); | ||||
| } | ||||
| EXPORT_SYMBOL(dma_unmap_single); | ||||
| 
 | ||||
| int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, | ||||
| 	   enum dma_data_direction direction) | ||||
| static int tile_dma_map_sg(struct device *dev, struct scatterlist *sglist, | ||||
| 			   int nents, enum dma_data_direction direction, | ||||
| 			   struct dma_attrs *attrs) | ||||
| { | ||||
| 	struct scatterlist *sg; | ||||
| 	int i; | ||||
| @ -143,73 +212,89 @@ int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, | ||||
| 
 | ||||
| 	for_each_sg(sglist, sg, nents, i) { | ||||
| 		sg->dma_address = sg_phys(sg); | ||||
| 		__dma_map_pa_range(sg->dma_address, sg->length); | ||||
| 		__dma_prep_pa_range(sg->dma_address, sg->length, direction); | ||||
| #ifdef CONFIG_NEED_SG_DMA_LENGTH | ||||
| 		sg->dma_length = sg->length; | ||||
| #endif | ||||
| 	} | ||||
| 
 | ||||
| 	return nents; | ||||
| } | ||||
| EXPORT_SYMBOL(dma_map_sg); | ||||
| 
 | ||||
| void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | ||||
| 	     enum dma_data_direction direction) | ||||
| static void tile_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, | ||||
| 			      int nents, enum dma_data_direction direction, | ||||
| 			      struct dma_attrs *attrs) | ||||
| { | ||||
| 	BUG_ON(!valid_dma_direction(direction)); | ||||
| } | ||||
| EXPORT_SYMBOL(dma_unmap_sg); | ||||
| 	struct scatterlist *sg; | ||||
| 	int i; | ||||
| 
 | ||||
| dma_addr_t dma_map_page(struct device *dev, struct page *page, | ||||
| 			unsigned long offset, size_t size, | ||||
| 			enum dma_data_direction direction) | ||||
| 	BUG_ON(!valid_dma_direction(direction)); | ||||
| 	for_each_sg(sglist, sg, nents, i) { | ||||
| 		sg->dma_address = sg_phys(sg); | ||||
| 		__dma_complete_pa_range(sg->dma_address, sg->length, | ||||
| 					direction); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| static dma_addr_t tile_dma_map_page(struct device *dev, struct page *page, | ||||
| 				    unsigned long offset, size_t size, | ||||
| 				    enum dma_data_direction direction, | ||||
| 				    struct dma_attrs *attrs) | ||||
| { | ||||
| 	BUG_ON(!valid_dma_direction(direction)); | ||||
| 
 | ||||
| 	BUG_ON(offset + size > PAGE_SIZE); | ||||
| 	homecache_flush_cache(page, 0); | ||||
| 	__dma_prep_page(page, offset, size, direction); | ||||
| 
 | ||||
| 	return page_to_pa(page) + offset; | ||||
| } | ||||
| EXPORT_SYMBOL(dma_map_page); | ||||
| 
 | ||||
| void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | ||||
| 	       enum dma_data_direction direction) | ||||
| static void tile_dma_unmap_page(struct device *dev, dma_addr_t dma_address, | ||||
| 				size_t size, enum dma_data_direction direction, | ||||
| 				struct dma_attrs *attrs) | ||||
| { | ||||
| 	BUG_ON(!valid_dma_direction(direction)); | ||||
| } | ||||
| EXPORT_SYMBOL(dma_unmap_page); | ||||
| 
 | ||||
| void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||||
| 			     size_t size, enum dma_data_direction direction) | ||||
| 	__dma_complete_page(pfn_to_page(PFN_DOWN(dma_address)), | ||||
| 			    dma_address & PAGE_OFFSET, size, direction); | ||||
| } | ||||
| 
 | ||||
| static void tile_dma_sync_single_for_cpu(struct device *dev, | ||||
| 					 dma_addr_t dma_handle, | ||||
| 					 size_t size, | ||||
| 					 enum dma_data_direction direction) | ||||
| { | ||||
| 	BUG_ON(!valid_dma_direction(direction)); | ||||
| } | ||||
| EXPORT_SYMBOL(dma_sync_single_for_cpu); | ||||
| 
 | ||||
| void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | ||||
| 				size_t size, enum dma_data_direction direction) | ||||
| 	__dma_complete_pa_range(dma_handle, size, direction); | ||||
| } | ||||
| 
 | ||||
| static void tile_dma_sync_single_for_device(struct device *dev, | ||||
| 					    dma_addr_t dma_handle, size_t size, | ||||
| 					    enum dma_data_direction direction) | ||||
| { | ||||
| 	unsigned long start = PFN_DOWN(dma_handle); | ||||
| 	unsigned long end = PFN_DOWN(dma_handle + size - 1); | ||||
| 	unsigned long i; | ||||
| 	__dma_prep_pa_range(dma_handle, size, direction); | ||||
| } | ||||
| 
 | ||||
| static void tile_dma_sync_sg_for_cpu(struct device *dev, | ||||
| 				     struct scatterlist *sglist, int nelems, | ||||
| 				     enum dma_data_direction direction) | ||||
| { | ||||
| 	struct scatterlist *sg; | ||||
| 	int i; | ||||
| 
 | ||||
| 	BUG_ON(!valid_dma_direction(direction)); | ||||
| 	for (i = start; i <= end; ++i) | ||||
| 		homecache_flush_cache(pfn_to_page(i), 0); | ||||
| } | ||||
| EXPORT_SYMBOL(dma_sync_single_for_device); | ||||
| 	WARN_ON(nelems == 0 || sglist->length == 0); | ||||
| 
 | ||||
| void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | ||||
| 		    enum dma_data_direction direction) | ||||
| { | ||||
| 	BUG_ON(!valid_dma_direction(direction)); | ||||
| 	WARN_ON(nelems == 0 || sg[0].length == 0); | ||||
| 	for_each_sg(sglist, sg, nelems, i) { | ||||
| 		dma_sync_single_for_cpu(dev, sg->dma_address, | ||||
| 					sg_dma_len(sg), direction); | ||||
| 	} | ||||
| } | ||||
| EXPORT_SYMBOL(dma_sync_sg_for_cpu); | ||||
| 
 | ||||
| /*
 | ||||
|  * Flush and invalidate cache for scatterlist. | ||||
|  */ | ||||
| void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, | ||||
| 			    int nelems, enum dma_data_direction direction) | ||||
| static void tile_dma_sync_sg_for_device(struct device *dev, | ||||
| 					struct scatterlist *sglist, int nelems, | ||||
| 					enum dma_data_direction direction) | ||||
| { | ||||
| 	struct scatterlist *sg; | ||||
| 	int i; | ||||
| @ -222,31 +307,280 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, | ||||
| 					   sg_dma_len(sg), direction); | ||||
| 	} | ||||
| } | ||||
| EXPORT_SYMBOL(dma_sync_sg_for_device); | ||||
| 
 | ||||
| void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||||
| 				   unsigned long offset, size_t size, | ||||
| 				   enum dma_data_direction direction) | ||||
| static inline int | ||||
| tile_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||||
| { | ||||
| 	dma_sync_single_for_cpu(dev, dma_handle + offset, size, direction); | ||||
| 	return 0; | ||||
| } | ||||
| EXPORT_SYMBOL(dma_sync_single_range_for_cpu); | ||||
| 
 | ||||
| void dma_sync_single_range_for_device(struct device *dev, | ||||
| 				      dma_addr_t dma_handle, | ||||
| 				      unsigned long offset, size_t size, | ||||
| 				      enum dma_data_direction direction) | ||||
| static inline int | ||||
| tile_dma_supported(struct device *dev, u64 mask) | ||||
| { | ||||
| 	dma_sync_single_for_device(dev, dma_handle + offset, size, direction); | ||||
| 	return 1; | ||||
| } | ||||
| 
 | ||||
| static struct dma_map_ops tile_default_dma_map_ops = { | ||||
| 	.alloc = tile_dma_alloc_coherent, | ||||
| 	.free = tile_dma_free_coherent, | ||||
| 	.map_page = tile_dma_map_page, | ||||
| 	.unmap_page = tile_dma_unmap_page, | ||||
| 	.map_sg = tile_dma_map_sg, | ||||
| 	.unmap_sg = tile_dma_unmap_sg, | ||||
| 	.sync_single_for_cpu = tile_dma_sync_single_for_cpu, | ||||
| 	.sync_single_for_device = tile_dma_sync_single_for_device, | ||||
| 	.sync_sg_for_cpu = tile_dma_sync_sg_for_cpu, | ||||
| 	.sync_sg_for_device = tile_dma_sync_sg_for_device, | ||||
| 	.mapping_error = tile_dma_mapping_error, | ||||
| 	.dma_supported = tile_dma_supported | ||||
| }; | ||||
| 
 | ||||
| struct dma_map_ops *tile_dma_map_ops = &tile_default_dma_map_ops; | ||||
| EXPORT_SYMBOL(tile_dma_map_ops); | ||||
| 
 | ||||
| /* Generic PCI DMA mapping functions */ | ||||
| 
 | ||||
| static void *tile_pci_dma_alloc_coherent(struct device *dev, size_t size, | ||||
| 					 dma_addr_t *dma_handle, gfp_t gfp, | ||||
| 					 struct dma_attrs *attrs) | ||||
| { | ||||
| 	int node = dev_to_node(dev); | ||||
| 	int order = get_order(size); | ||||
| 	struct page *pg; | ||||
| 	dma_addr_t addr; | ||||
| 
 | ||||
| 	gfp |= __GFP_ZERO; | ||||
| 
 | ||||
| 	pg = homecache_alloc_pages_node(node, gfp, order, PAGE_HOME_DMA); | ||||
| 	if (pg == NULL) | ||||
| 		return NULL; | ||||
| 
 | ||||
| 	addr = page_to_phys(pg); | ||||
| 
 | ||||
| 	*dma_handle = phys_to_dma(dev, addr); | ||||
| 
 | ||||
| 	return page_address(pg); | ||||
| } | ||||
| EXPORT_SYMBOL(dma_sync_single_range_for_device); | ||||
| 
 | ||||
| /*
 | ||||
|  * dma_alloc_noncoherent() returns non-cacheable memory, so there's no | ||||
|  * need to do any flushing here. | ||||
|  * Free memory that was allocated with tile_pci_dma_alloc_coherent. | ||||
|  */ | ||||
| void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | ||||
| 		    enum dma_data_direction direction) | ||||
| static void tile_pci_dma_free_coherent(struct device *dev, size_t size, | ||||
| 				       void *vaddr, dma_addr_t dma_handle, | ||||
| 				       struct dma_attrs *attrs) | ||||
| { | ||||
| 	homecache_free_pages((unsigned long)vaddr, get_order(size)); | ||||
| } | ||||
| EXPORT_SYMBOL(dma_cache_sync); | ||||
| 
 | ||||
| static int tile_pci_dma_map_sg(struct device *dev, struct scatterlist *sglist, | ||||
| 			       int nents, enum dma_data_direction direction, | ||||
| 			       struct dma_attrs *attrs) | ||||
| { | ||||
| 	struct scatterlist *sg; | ||||
| 	int i; | ||||
| 
 | ||||
| 	BUG_ON(!valid_dma_direction(direction)); | ||||
| 
 | ||||
| 	WARN_ON(nents == 0 || sglist->length == 0); | ||||
| 
 | ||||
| 	for_each_sg(sglist, sg, nents, i) { | ||||
| 		sg->dma_address = sg_phys(sg); | ||||
| 		__dma_prep_pa_range(sg->dma_address, sg->length, direction); | ||||
| 
 | ||||
| 		sg->dma_address = phys_to_dma(dev, sg->dma_address); | ||||
| #ifdef CONFIG_NEED_SG_DMA_LENGTH | ||||
| 		sg->dma_length = sg->length; | ||||
| #endif | ||||
| 	} | ||||
| 
 | ||||
| 	return nents; | ||||
| } | ||||
| 
 | ||||
| static void tile_pci_dma_unmap_sg(struct device *dev, | ||||
| 				  struct scatterlist *sglist, int nents, | ||||
| 				  enum dma_data_direction direction, | ||||
| 				  struct dma_attrs *attrs) | ||||
| { | ||||
| 	struct scatterlist *sg; | ||||
| 	int i; | ||||
| 
 | ||||
| 	BUG_ON(!valid_dma_direction(direction)); | ||||
| 	for_each_sg(sglist, sg, nents, i) { | ||||
| 		sg->dma_address = sg_phys(sg); | ||||
| 		__dma_complete_pa_range(sg->dma_address, sg->length, | ||||
| 					direction); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| static dma_addr_t tile_pci_dma_map_page(struct device *dev, struct page *page, | ||||
| 					unsigned long offset, size_t size, | ||||
| 					enum dma_data_direction direction, | ||||
| 					struct dma_attrs *attrs) | ||||
| { | ||||
| 	BUG_ON(!valid_dma_direction(direction)); | ||||
| 
 | ||||
| 	BUG_ON(offset + size > PAGE_SIZE); | ||||
| 	__dma_prep_page(page, offset, size, direction); | ||||
| 
 | ||||
| 	return phys_to_dma(dev, page_to_pa(page) + offset); | ||||
| } | ||||
| 
 | ||||
| static void tile_pci_dma_unmap_page(struct device *dev, dma_addr_t dma_address, | ||||
| 				    size_t size, | ||||
| 				    enum dma_data_direction direction, | ||||
| 				    struct dma_attrs *attrs) | ||||
| { | ||||
| 	BUG_ON(!valid_dma_direction(direction)); | ||||
| 
 | ||||
| 	dma_address = dma_to_phys(dev, dma_address); | ||||
| 
 | ||||
| 	__dma_complete_page(pfn_to_page(PFN_DOWN(dma_address)), | ||||
| 			    dma_address & PAGE_OFFSET, size, direction); | ||||
| } | ||||
| 
 | ||||
| static void tile_pci_dma_sync_single_for_cpu(struct device *dev, | ||||
| 					     dma_addr_t dma_handle, | ||||
| 					     size_t size, | ||||
| 					     enum dma_data_direction direction) | ||||
| { | ||||
| 	BUG_ON(!valid_dma_direction(direction)); | ||||
| 
 | ||||
| 	dma_handle = dma_to_phys(dev, dma_handle); | ||||
| 
 | ||||
| 	__dma_complete_pa_range(dma_handle, size, direction); | ||||
| } | ||||
| 
 | ||||
| static void tile_pci_dma_sync_single_for_device(struct device *dev, | ||||
| 						dma_addr_t dma_handle, | ||||
| 						size_t size, | ||||
| 						enum dma_data_direction | ||||
| 						direction) | ||||
| { | ||||
| 	dma_handle = dma_to_phys(dev, dma_handle); | ||||
| 
 | ||||
| 	__dma_prep_pa_range(dma_handle, size, direction); | ||||
| } | ||||
| 
 | ||||
| static void tile_pci_dma_sync_sg_for_cpu(struct device *dev, | ||||
| 					 struct scatterlist *sglist, | ||||
| 					 int nelems, | ||||
| 					 enum dma_data_direction direction) | ||||
| { | ||||
| 	struct scatterlist *sg; | ||||
| 	int i; | ||||
| 
 | ||||
| 	BUG_ON(!valid_dma_direction(direction)); | ||||
| 	WARN_ON(nelems == 0 || sglist->length == 0); | ||||
| 
 | ||||
| 	for_each_sg(sglist, sg, nelems, i) { | ||||
| 		dma_sync_single_for_cpu(dev, sg->dma_address, | ||||
| 					sg_dma_len(sg), direction); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| static void tile_pci_dma_sync_sg_for_device(struct device *dev, | ||||
| 					    struct scatterlist *sglist, | ||||
| 					    int nelems, | ||||
| 					    enum dma_data_direction direction) | ||||
| { | ||||
| 	struct scatterlist *sg; | ||||
| 	int i; | ||||
| 
 | ||||
| 	BUG_ON(!valid_dma_direction(direction)); | ||||
| 	WARN_ON(nelems == 0 || sglist->length == 0); | ||||
| 
 | ||||
| 	for_each_sg(sglist, sg, nelems, i) { | ||||
| 		dma_sync_single_for_device(dev, sg->dma_address, | ||||
| 					   sg_dma_len(sg), direction); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| static inline int | ||||
| tile_pci_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||||
| { | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| static inline int | ||||
| tile_pci_dma_supported(struct device *dev, u64 mask) | ||||
| { | ||||
| 	return 1; | ||||
| } | ||||
| 
 | ||||
| static struct dma_map_ops tile_pci_default_dma_map_ops = { | ||||
| 	.alloc = tile_pci_dma_alloc_coherent, | ||||
| 	.free = tile_pci_dma_free_coherent, | ||||
| 	.map_page = tile_pci_dma_map_page, | ||||
| 	.unmap_page = tile_pci_dma_unmap_page, | ||||
| 	.map_sg = tile_pci_dma_map_sg, | ||||
| 	.unmap_sg = tile_pci_dma_unmap_sg, | ||||
| 	.sync_single_for_cpu = tile_pci_dma_sync_single_for_cpu, | ||||
| 	.sync_single_for_device = tile_pci_dma_sync_single_for_device, | ||||
| 	.sync_sg_for_cpu = tile_pci_dma_sync_sg_for_cpu, | ||||
| 	.sync_sg_for_device = tile_pci_dma_sync_sg_for_device, | ||||
| 	.mapping_error = tile_pci_dma_mapping_error, | ||||
| 	.dma_supported = tile_pci_dma_supported | ||||
| }; | ||||
| 
 | ||||
| struct dma_map_ops *gx_pci_dma_map_ops = &tile_pci_default_dma_map_ops; | ||||
| EXPORT_SYMBOL(gx_pci_dma_map_ops); | ||||
| 
 | ||||
| /* PCI DMA mapping functions for legacy PCI devices */ | ||||
| 
 | ||||
| #ifdef CONFIG_SWIOTLB | ||||
| static void *tile_swiotlb_alloc_coherent(struct device *dev, size_t size, | ||||
| 					 dma_addr_t *dma_handle, gfp_t gfp, | ||||
| 					 struct dma_attrs *attrs) | ||||
| { | ||||
| 	gfp |= GFP_DMA; | ||||
| 	return swiotlb_alloc_coherent(dev, size, dma_handle, gfp); | ||||
| } | ||||
| 
 | ||||
| static void tile_swiotlb_free_coherent(struct device *dev, size_t size, | ||||
| 				       void *vaddr, dma_addr_t dma_addr, | ||||
| 				       struct dma_attrs *attrs) | ||||
| { | ||||
| 	swiotlb_free_coherent(dev, size, vaddr, dma_addr); | ||||
| } | ||||
| 
 | ||||
| static struct dma_map_ops pci_swiotlb_dma_ops = { | ||||
| 	.alloc = tile_swiotlb_alloc_coherent, | ||||
| 	.free = tile_swiotlb_free_coherent, | ||||
| 	.map_page = swiotlb_map_page, | ||||
| 	.unmap_page = swiotlb_unmap_page, | ||||
| 	.map_sg = swiotlb_map_sg_attrs, | ||||
| 	.unmap_sg = swiotlb_unmap_sg_attrs, | ||||
| 	.sync_single_for_cpu = swiotlb_sync_single_for_cpu, | ||||
| 	.sync_single_for_device = swiotlb_sync_single_for_device, | ||||
| 	.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, | ||||
| 	.sync_sg_for_device = swiotlb_sync_sg_for_device, | ||||
| 	.dma_supported = swiotlb_dma_supported, | ||||
| 	.mapping_error = swiotlb_dma_mapping_error, | ||||
| }; | ||||
| 
 | ||||
| struct dma_map_ops *gx_legacy_pci_dma_map_ops = &pci_swiotlb_dma_ops; | ||||
| #else | ||||
| struct dma_map_ops *gx_legacy_pci_dma_map_ops; | ||||
| #endif | ||||
| EXPORT_SYMBOL(gx_legacy_pci_dma_map_ops); | ||||
| 
 | ||||
| #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK | ||||
| int dma_set_coherent_mask(struct device *dev, u64 mask) | ||||
| { | ||||
| 	struct dma_map_ops *dma_ops = get_dma_ops(dev); | ||||
| 
 | ||||
| 	/* Handle legacy PCI devices with limited memory addressability. */ | ||||
| 	if (((dma_ops == gx_pci_dma_map_ops) || | ||||
| 	    (dma_ops == gx_legacy_pci_dma_map_ops)) && | ||||
| 	    (mask <= DMA_BIT_MASK(32))) { | ||||
| 		if (mask > dev->archdata.max_direct_dma_addr) | ||||
| 			mask = dev->archdata.max_direct_dma_addr; | ||||
| 	} | ||||
| 
 | ||||
| 	if (!dma_supported(dev, mask)) | ||||
| 		return -EIO; | ||||
| 	dev->coherent_dma_mask = mask; | ||||
| 	return 0; | ||||
| } | ||||
| EXPORT_SYMBOL(dma_set_coherent_mask); | ||||
| #endif | ||||
|  | ||||
							
								
								
									
										1543
									
								
								arch/tile/kernel/pci_gx.c
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1543
									
								
								arch/tile/kernel/pci_gx.c
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							| @ -23,6 +23,7 @@ | ||||
| #include <linux/irq.h> | ||||
| #include <linux/kexec.h> | ||||
| #include <linux/pci.h> | ||||
| #include <linux/swiotlb.h> | ||||
| #include <linux/initrd.h> | ||||
| #include <linux/io.h> | ||||
| #include <linux/highmem.h> | ||||
| @ -109,7 +110,7 @@ static unsigned int __initdata maxnodemem_pfn[MAX_NUMNODES] = { | ||||
| }; | ||||
| static nodemask_t __initdata isolnodes; | ||||
| 
 | ||||
| #ifdef CONFIG_PCI | ||||
| #if defined(CONFIG_PCI) && !defined(__tilegx__) | ||||
| enum { DEFAULT_PCI_RESERVE_MB = 64 }; | ||||
| static unsigned int __initdata pci_reserve_mb = DEFAULT_PCI_RESERVE_MB; | ||||
| unsigned long __initdata pci_reserve_start_pfn = -1U; | ||||
| @ -160,7 +161,7 @@ static int __init setup_isolnodes(char *str) | ||||
| } | ||||
| early_param("isolnodes", setup_isolnodes); | ||||
| 
 | ||||
| #ifdef CONFIG_PCI | ||||
| #if defined(CONFIG_PCI) && !defined(__tilegx__) | ||||
| static int __init setup_pci_reserve(char* str) | ||||
| { | ||||
| 	unsigned long mb; | ||||
| @ -171,7 +172,7 @@ static int __init setup_pci_reserve(char* str) | ||||
| 
 | ||||
| 	pci_reserve_mb = mb; | ||||
| 	pr_info("Reserving %dMB for PCIE root complex mappings\n", | ||||
| 	       pci_reserve_mb); | ||||
| 		pci_reserve_mb); | ||||
| 	return 0; | ||||
| } | ||||
| early_param("pci_reserve", setup_pci_reserve); | ||||
| @ -411,7 +412,7 @@ static void __init setup_memory(void) | ||||
| 			continue; | ||||
| 		} | ||||
| #endif | ||||
| #ifdef CONFIG_PCI | ||||
| #if defined(CONFIG_PCI) && !defined(__tilegx__) | ||||
| 		/*
 | ||||
| 		 * Blocks that overlap the pci reserved region must | ||||
| 		 * have enough space to hold the maximum percpu data | ||||
| @ -604,11 +605,9 @@ static void __init setup_bootmem_allocator_node(int i) | ||||
| 	/* Free all the space back into the allocator. */ | ||||
| 	free_bootmem(PFN_PHYS(start), PFN_PHYS(end - start)); | ||||
| 
 | ||||
| #if defined(CONFIG_PCI) | ||||
| #if defined(CONFIG_PCI) && !defined(__tilegx__) | ||||
| 	/*
 | ||||
| 	 * Throw away any memory aliased by the PCI region.  FIXME: this | ||||
| 	 * is a temporary hack to work around bug 10502, and needs to be | ||||
| 	 * fixed properly. | ||||
| 	 * Throw away any memory aliased by the PCI region. | ||||
| 	 */ | ||||
| 	if (pci_reserve_start_pfn < end && pci_reserve_end_pfn > start) | ||||
| 		reserve_bootmem(PFN_PHYS(pci_reserve_start_pfn), | ||||
| @ -658,6 +657,8 @@ static void __init zone_sizes_init(void) | ||||
| 	unsigned long zones_size[MAX_NR_ZONES] = { 0 }; | ||||
| 	int size = percpu_size(); | ||||
| 	int num_cpus = smp_height * smp_width; | ||||
| 	const unsigned long dma_end = (1UL << (32 - PAGE_SHIFT)); | ||||
| 
 | ||||
| 	int i; | ||||
| 
 | ||||
| 	for (i = 0; i < num_cpus; ++i) | ||||
| @ -729,6 +730,14 @@ static void __init zone_sizes_init(void) | ||||
| 		zones_size[ZONE_NORMAL] = end - start; | ||||
| #endif | ||||
| 
 | ||||
| 		if (start < dma_end) { | ||||
| 			zones_size[ZONE_DMA] = min(zones_size[ZONE_NORMAL], | ||||
| 						   dma_end - start); | ||||
| 			zones_size[ZONE_NORMAL] -= zones_size[ZONE_DMA]; | ||||
| 		} else { | ||||
| 			zones_size[ZONE_DMA] = 0; | ||||
| 		} | ||||
| 
 | ||||
| 		/* Take zone metadata from controller 0 if we're isolnode. */ | ||||
| 		if (node_isset(i, isolnodes)) | ||||
| 			NODE_DATA(i)->bdata = &bootmem_node_data[0]; | ||||
| @ -738,7 +747,7 @@ static void __init zone_sizes_init(void) | ||||
| 		       PFN_UP(node_percpu[i])); | ||||
| 
 | ||||
| 		/* Track the type of memory on each node */ | ||||
| 		if (zones_size[ZONE_NORMAL]) | ||||
| 		if (zones_size[ZONE_NORMAL] || zones_size[ZONE_DMA]) | ||||
| 			node_set_state(i, N_NORMAL_MEMORY); | ||||
| #ifdef CONFIG_HIGHMEM | ||||
| 		if (end != start) | ||||
| @ -1343,7 +1352,7 @@ void __init setup_arch(char **cmdline_p) | ||||
| 	setup_cpu_maps(); | ||||
| 
 | ||||
| 
 | ||||
| #ifdef CONFIG_PCI | ||||
| #if defined(CONFIG_PCI) && !defined(__tilegx__) | ||||
| 	/*
 | ||||
| 	 * Initialize the PCI structures.  This is done before memory | ||||
| 	 * setup so that we know whether or not a pci_reserve region | ||||
| @ -1372,6 +1381,10 @@ void __init setup_arch(char **cmdline_p) | ||||
| 	 * any memory using the bootmem allocator. | ||||
| 	 */ | ||||
| 
 | ||||
| #ifdef CONFIG_SWIOTLB | ||||
| 	swiotlb_init(0); | ||||
| #endif | ||||
| 
 | ||||
| 	paging_init(); | ||||
| 	setup_numa_mapping(); | ||||
| 	zone_sizes_init(); | ||||
| @ -1522,11 +1535,10 @@ static struct resource code_resource = { | ||||
| }; | ||||
| 
 | ||||
| /*
 | ||||
|  * We reserve all resources above 4GB so that PCI won't try to put | ||||
|  * mappings above 4GB; the standard allows that for some devices but | ||||
|  * the probing code trunates values to 32 bits. | ||||
|  * On Pro, we reserve all resources above 4GB so that PCI won't try to put | ||||
|  * mappings above 4GB. | ||||
|  */ | ||||
| #ifdef CONFIG_PCI | ||||
| #if defined(CONFIG_PCI) && !defined(__tilegx__) | ||||
| static struct resource* __init | ||||
| insert_non_bus_resource(void) | ||||
| { | ||||
| @ -1571,8 +1583,7 @@ static int __init request_standard_resources(void) | ||||
| 	int i; | ||||
| 	enum { CODE_DELTA = MEM_SV_INTRPT - PAGE_OFFSET }; | ||||
| 
 | ||||
| 	iomem_resource.end = -1LL; | ||||
| #ifdef CONFIG_PCI | ||||
| #if defined(CONFIG_PCI) && !defined(__tilegx__) | ||||
| 	insert_non_bus_resource(); | ||||
| #endif | ||||
| 
 | ||||
| @ -1580,7 +1591,7 @@ static int __init request_standard_resources(void) | ||||
| 		u64 start_pfn = node_start_pfn[i]; | ||||
| 		u64 end_pfn = node_end_pfn[i]; | ||||
| 
 | ||||
| #ifdef CONFIG_PCI | ||||
| #if defined(CONFIG_PCI) && !defined(__tilegx__) | ||||
| 		if (start_pfn <= pci_reserve_start_pfn && | ||||
| 		    end_pfn > pci_reserve_start_pfn) { | ||||
| 			if (end_pfn > pci_reserve_end_pfn) | ||||
|  | ||||
							
								
								
									
										69
									
								
								arch/tile/kernel/usb.c
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										69
									
								
								arch/tile/kernel/usb.c
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,69 @@ | ||||
| /*
 | ||||
|  * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||||
|  * | ||||
|  *   This program is free software; you can redistribute it and/or | ||||
|  *   modify it under the terms of the GNU General Public License | ||||
|  *   as published by the Free Software Foundation, version 2. | ||||
|  * | ||||
|  *   This program is distributed in the hope that it will be useful, but | ||||
|  *   WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||||
|  *   NON INFRINGEMENT.  See the GNU General Public License for | ||||
|  *   more details. | ||||
|  * | ||||
|  * Register the Tile-Gx USB interfaces as platform devices. | ||||
|  * | ||||
|  * The actual USB driver is just some glue (in | ||||
|  * drivers/usb/host/[eo]hci-tilegx.c) which makes the registers available | ||||
|  * to the standard kernel EHCI and OHCI drivers. | ||||
|  */ | ||||
| 
 | ||||
| #include <linux/dma-mapping.h> | ||||
| #include <linux/platform_device.h> | ||||
| #include <linux/usb/tilegx.h> | ||||
| #include <linux/types.h> | ||||
| 
 | ||||
| static u64 ehci_dmamask = DMA_BIT_MASK(32); | ||||
| 
 | ||||
| #define USB_HOST_DEF(unit, type, dmamask) \ | ||||
| 	static struct \ | ||||
| 	    tilegx_usb_platform_data tilegx_usb_platform_data_ ## type ## \ | ||||
| 		hci ## unit = { \ | ||||
| 		.dev_index = unit, \ | ||||
| 	}; \ | ||||
| 	\ | ||||
| 	static struct platform_device tilegx_usb_ ## type ## hci ## unit = { \ | ||||
| 		.name		= "tilegx-" #type "hci", \ | ||||
| 		.id		= unit, \ | ||||
| 		.dev = { \ | ||||
| 			.dma_mask		= dmamask, \ | ||||
| 			.coherent_dma_mask	= DMA_BIT_MASK(32), \ | ||||
| 			.platform_data = \ | ||||
| 				&tilegx_usb_platform_data_ ## type ## hci ## \ | ||||
| 				unit, \ | ||||
| 		}, \ | ||||
| 	}; | ||||
| 
 | ||||
| USB_HOST_DEF(0, e, &ehci_dmamask) | ||||
| USB_HOST_DEF(0, o, NULL) | ||||
| USB_HOST_DEF(1, e, &ehci_dmamask) | ||||
| USB_HOST_DEF(1, o, NULL) | ||||
| 
 | ||||
| #undef USB_HOST_DEF | ||||
| 
 | ||||
| static struct platform_device *tilegx_usb_devices[] __initdata = { | ||||
| 	&tilegx_usb_ehci0, | ||||
| 	&tilegx_usb_ehci1, | ||||
| 	&tilegx_usb_ohci0, | ||||
| 	&tilegx_usb_ohci1, | ||||
| }; | ||||
| 
 | ||||
| /** Add our set of possible USB devices. */ | ||||
| static int __init tilegx_usb_init(void) | ||||
| { | ||||
| 	platform_add_devices(tilegx_usb_devices, | ||||
| 			     ARRAY_SIZE(tilegx_usb_devices)); | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| arch_initcall(tilegx_usb_init); | ||||
| @ -16,19 +16,6 @@ | ||||
| #include <net/checksum.h> | ||||
| #include <linux/module.h> | ||||
| 
 | ||||
| static inline unsigned int longto16(unsigned long x) | ||||
| { | ||||
| 	unsigned long ret; | ||||
| #ifdef __tilegx__ | ||||
| 	ret = __insn_v2sadu(x, 0); | ||||
| 	ret = __insn_v2sadu(ret, 0); | ||||
| #else | ||||
| 	ret = __insn_sadh_u(x, 0); | ||||
| 	ret = __insn_sadh_u(ret, 0); | ||||
| #endif | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| __wsum do_csum(const unsigned char *buff, int len) | ||||
| { | ||||
| 	int odd, count; | ||||
| @ -94,7 +81,7 @@ __wsum do_csum(const unsigned char *buff, int len) | ||||
| 	} | ||||
| 	if (len & 1) | ||||
| 		result += *buff; | ||||
| 	result = longto16(result); | ||||
| 	result = csum_long(result); | ||||
| 	if (odd) | ||||
| 		result = swab16(result); | ||||
| out: | ||||
|  | ||||
| @ -64,10 +64,6 @@ early_param("noallocl2", set_noallocl2); | ||||
| 
 | ||||
| #endif | ||||
| 
 | ||||
| /* Provide no-op versions of these routines to keep flush_remote() cleaner. */ | ||||
| #define mark_caches_evicted_start() 0 | ||||
| #define mark_caches_evicted_finish(mask, timestamp) do {} while (0) | ||||
| 
 | ||||
| 
 | ||||
| /*
 | ||||
|  * Update the irq_stat for cpus that we are going to interrupt | ||||
| @ -107,7 +103,6 @@ static void hv_flush_update(const struct cpumask *cache_cpumask, | ||||
|  *    there's never any good reason for hv_flush_remote() to fail. | ||||
|  *  - Accepts a 32-bit PFN rather than a 64-bit PA, which generally | ||||
|  *    is the type that Linux wants to pass around anyway. | ||||
|  *  - Centralizes the mark_caches_evicted() handling. | ||||
|  *  - Canonicalizes that lengths of zero make cpumasks NULL. | ||||
|  *  - Handles deferring TLB flushes for dataplane tiles. | ||||
|  *  - Tracks remote interrupts in the per-cpu irq_cpustat_t. | ||||
| @ -126,7 +121,6 @@ void flush_remote(unsigned long cache_pfn, unsigned long cache_control, | ||||
| 		  HV_Remote_ASID *asids, int asidcount) | ||||
| { | ||||
| 	int rc; | ||||
| 	int timestamp = 0;  /* happy compiler */ | ||||
| 	struct cpumask cache_cpumask_copy, tlb_cpumask_copy; | ||||
| 	struct cpumask *cache_cpumask, *tlb_cpumask; | ||||
| 	HV_PhysAddr cache_pa; | ||||
| @ -157,15 +151,11 @@ void flush_remote(unsigned long cache_pfn, unsigned long cache_control, | ||||
| 	hv_flush_update(cache_cpumask, tlb_cpumask, tlb_va, tlb_length, | ||||
| 			asids, asidcount); | ||||
| 	cache_pa = (HV_PhysAddr)cache_pfn << PAGE_SHIFT; | ||||
| 	if (cache_control & HV_FLUSH_EVICT_L2) | ||||
| 		timestamp = mark_caches_evicted_start(); | ||||
| 	rc = hv_flush_remote(cache_pa, cache_control, | ||||
| 			     cpumask_bits(cache_cpumask), | ||||
| 			     tlb_va, tlb_length, tlb_pgsize, | ||||
| 			     cpumask_bits(tlb_cpumask), | ||||
| 			     asids, asidcount); | ||||
| 	if (cache_control & HV_FLUSH_EVICT_L2) | ||||
| 		mark_caches_evicted_finish(cache_cpumask, timestamp); | ||||
| 	if (rc == 0) | ||||
| 		return; | ||||
| 	cpumask_scnprintf(cache_buf, sizeof(cache_buf), &cache_cpumask_copy); | ||||
| @ -180,86 +170,87 @@ void flush_remote(unsigned long cache_pfn, unsigned long cache_control, | ||||
| 	panic("Unsafe to continue."); | ||||
| } | ||||
| 
 | ||||
| void flush_remote_page(struct page *page, int order) | ||||
| static void homecache_finv_page_va(void* va, int home) | ||||
| { | ||||
| 	int i, pages = (1 << order); | ||||
| 	for (i = 0; i < pages; ++i, ++page) { | ||||
| 		void *p = kmap_atomic(page); | ||||
| 		int hfh = 0; | ||||
| 		int home = page_home(page); | ||||
| #if CHIP_HAS_CBOX_HOME_MAP() | ||||
| 		if (home == PAGE_HOME_HASH) | ||||
| 			hfh = 1; | ||||
| 		else | ||||
| #endif | ||||
| 			BUG_ON(home < 0 || home >= NR_CPUS); | ||||
| 		finv_buffer_remote(p, PAGE_SIZE, hfh); | ||||
| 		kunmap_atomic(p); | ||||
| 	if (home == smp_processor_id()) { | ||||
| 		finv_buffer_local(va, PAGE_SIZE); | ||||
| 	} else if (home == PAGE_HOME_HASH) { | ||||
| 		finv_buffer_remote(va, PAGE_SIZE, 1); | ||||
| 	} else { | ||||
| 		BUG_ON(home < 0 || home >= NR_CPUS); | ||||
| 		finv_buffer_remote(va, PAGE_SIZE, 0); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| void homecache_finv_map_page(struct page *page, int home) | ||||
| { | ||||
| 	unsigned long flags; | ||||
| 	unsigned long va; | ||||
| 	pte_t *ptep; | ||||
| 	pte_t pte; | ||||
| 
 | ||||
| 	if (home == PAGE_HOME_UNCACHED) | ||||
| 		return; | ||||
| 	local_irq_save(flags); | ||||
| #ifdef CONFIG_HIGHMEM | ||||
| 	va = __fix_to_virt(FIX_KMAP_BEGIN + kmap_atomic_idx_push() + | ||||
| 			   (KM_TYPE_NR * smp_processor_id())); | ||||
| #else | ||||
| 	va = __fix_to_virt(FIX_HOMECACHE_BEGIN + smp_processor_id()); | ||||
| #endif | ||||
| 	ptep = virt_to_pte(NULL, (unsigned long)va); | ||||
| 	pte = pfn_pte(page_to_pfn(page), PAGE_KERNEL); | ||||
| 	__set_pte(ptep, pte_set_home(pte, home)); | ||||
| 	homecache_finv_page_va((void *)va, home); | ||||
| 	__pte_clear(ptep); | ||||
| 	hv_flush_page(va, PAGE_SIZE); | ||||
| #ifdef CONFIG_HIGHMEM | ||||
| 	kmap_atomic_idx_pop(); | ||||
| #endif | ||||
| 	local_irq_restore(flags); | ||||
| } | ||||
| 
 | ||||
| static void homecache_finv_page_home(struct page *page, int home) | ||||
| { | ||||
| 	if (!PageHighMem(page) && home == page_home(page)) | ||||
| 		homecache_finv_page_va(page_address(page), home); | ||||
| 	else | ||||
| 		homecache_finv_map_page(page, home); | ||||
| } | ||||
| 
 | ||||
| static inline bool incoherent_home(int home) | ||||
| { | ||||
| 	return home == PAGE_HOME_IMMUTABLE || home == PAGE_HOME_INCOHERENT; | ||||
| } | ||||
| 
 | ||||
| static void homecache_finv_page_internal(struct page *page, int force_map) | ||||
| { | ||||
| 	int home = page_home(page); | ||||
| 	if (home == PAGE_HOME_UNCACHED) | ||||
| 		return; | ||||
| 	if (incoherent_home(home)) { | ||||
| 		int cpu; | ||||
| 		for_each_cpu(cpu, &cpu_cacheable_map) | ||||
| 			homecache_finv_map_page(page, cpu); | ||||
| 	} else if (force_map) { | ||||
| 		/* Force if, e.g., the normal mapping is migrating. */ | ||||
| 		homecache_finv_map_page(page, home); | ||||
| 	} else { | ||||
| 		homecache_finv_page_home(page, home); | ||||
| 	} | ||||
| 	sim_validate_lines_evicted(PFN_PHYS(page_to_pfn(page)), PAGE_SIZE); | ||||
| } | ||||
| 
 | ||||
| void homecache_finv_page(struct page *page) | ||||
| { | ||||
| 	homecache_finv_page_internal(page, 0); | ||||
| } | ||||
| 
 | ||||
| void homecache_evict(const struct cpumask *mask) | ||||
| { | ||||
| 	flush_remote(0, HV_FLUSH_EVICT_L2, mask, 0, 0, 0, NULL, NULL, 0); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * Return a mask of the cpus whose caches currently own these pages. | ||||
|  * The return value is whether the pages are all coherently cached | ||||
|  * (i.e. none are immutable, incoherent, or uncached). | ||||
|  */ | ||||
| static int homecache_mask(struct page *page, int pages, | ||||
| 			  struct cpumask *home_mask) | ||||
| { | ||||
| 	int i; | ||||
| 	int cached_coherently = 1; | ||||
| 	cpumask_clear(home_mask); | ||||
| 	for (i = 0; i < pages; ++i) { | ||||
| 		int home = page_home(&page[i]); | ||||
| 		if (home == PAGE_HOME_IMMUTABLE || | ||||
| 		    home == PAGE_HOME_INCOHERENT) { | ||||
| 			cpumask_copy(home_mask, cpu_possible_mask); | ||||
| 			return 0; | ||||
| 		} | ||||
| #if CHIP_HAS_CBOX_HOME_MAP() | ||||
| 		if (home == PAGE_HOME_HASH) { | ||||
| 			cpumask_or(home_mask, home_mask, &hash_for_home_map); | ||||
| 			continue; | ||||
| 		} | ||||
| #endif | ||||
| 		if (home == PAGE_HOME_UNCACHED) { | ||||
| 			cached_coherently = 0; | ||||
| 			continue; | ||||
| 		} | ||||
| 		BUG_ON(home < 0 || home >= NR_CPUS); | ||||
| 		cpumask_set_cpu(home, home_mask); | ||||
| 	} | ||||
| 	return cached_coherently; | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * Return the passed length, or zero if it's long enough that we | ||||
|  * believe we should evict the whole L2 cache. | ||||
|  */ | ||||
| static unsigned long cache_flush_length(unsigned long length) | ||||
| { | ||||
| 	return (length >= CHIP_L2_CACHE_SIZE()) ? HV_FLUSH_EVICT_L2 : length; | ||||
| } | ||||
| 
 | ||||
| /* Flush a page out of whatever cache(s) it is in. */ | ||||
| void homecache_flush_cache(struct page *page, int order) | ||||
| { | ||||
| 	int pages = 1 << order; | ||||
| 	int length = cache_flush_length(pages * PAGE_SIZE); | ||||
| 	unsigned long pfn = page_to_pfn(page); | ||||
| 	struct cpumask home_mask; | ||||
| 
 | ||||
| 	homecache_mask(page, pages, &home_mask); | ||||
| 	flush_remote(pfn, length, &home_mask, 0, 0, 0, NULL, NULL, 0); | ||||
| 	sim_validate_lines_evicted(PFN_PHYS(pfn), pages * PAGE_SIZE); | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| /* Report the home corresponding to a given PTE. */ | ||||
| static int pte_to_home(pte_t pte) | ||||
| { | ||||
| @ -441,15 +432,8 @@ struct page *homecache_alloc_pages_node(int nid, gfp_t gfp_mask, | ||||
| 	return page; | ||||
| } | ||||
| 
 | ||||
| void homecache_free_pages(unsigned long addr, unsigned int order) | ||||
| void __homecache_free_pages(struct page *page, unsigned int order) | ||||
| { | ||||
| 	struct page *page; | ||||
| 
 | ||||
| 	if (addr == 0) | ||||
| 		return; | ||||
| 
 | ||||
| 	VM_BUG_ON(!virt_addr_valid((void *)addr)); | ||||
| 	page = virt_to_page((void *)addr); | ||||
| 	if (put_page_testzero(page)) { | ||||
| 		homecache_change_page_home(page, order, initial_page_home()); | ||||
| 		if (order == 0) { | ||||
| @ -460,3 +444,13 @@ void homecache_free_pages(unsigned long addr, unsigned int order) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| EXPORT_SYMBOL(__homecache_free_pages); | ||||
| 
 | ||||
| void homecache_free_pages(unsigned long addr, unsigned int order) | ||||
| { | ||||
| 	if (addr != 0) { | ||||
| 		VM_BUG_ON(!virt_addr_valid((void *)addr)); | ||||
| 		__homecache_free_pages(virt_to_page((void *)addr), order); | ||||
| 	} | ||||
| } | ||||
| EXPORT_SYMBOL(homecache_free_pages); | ||||
|  | ||||
| @ -150,7 +150,21 @@ void __init shatter_pmd(pmd_t *pmd) | ||||
| 	assign_pte(pmd, pte); | ||||
| } | ||||
| 
 | ||||
| #ifdef CONFIG_HIGHMEM | ||||
| #ifdef __tilegx__ | ||||
| static pmd_t *__init get_pmd(pgd_t pgtables[], unsigned long va) | ||||
| { | ||||
| 	pud_t *pud = pud_offset(&pgtables[pgd_index(va)], va); | ||||
| 	if (pud_none(*pud)) | ||||
| 		assign_pmd(pud, alloc_pmd()); | ||||
| 	return pmd_offset(pud, va); | ||||
| } | ||||
| #else | ||||
| static pmd_t *__init get_pmd(pgd_t pgtables[], unsigned long va) | ||||
| { | ||||
| 	return pmd_offset(pud_offset(&pgtables[pgd_index(va)], va), va); | ||||
| } | ||||
| #endif | ||||
| 
 | ||||
| /*
 | ||||
|  * This function initializes a certain range of kernel virtual memory | ||||
|  * with new bootmem page tables, everywhere page tables are missing in | ||||
| @ -163,24 +177,17 @@ void __init shatter_pmd(pmd_t *pmd) | ||||
|  * checking the pgd every time. | ||||
|  */ | ||||
| static void __init page_table_range_init(unsigned long start, | ||||
| 					 unsigned long end, pgd_t *pgd_base) | ||||
| 					 unsigned long end, pgd_t *pgd) | ||||
| { | ||||
| 	pgd_t *pgd; | ||||
| 	int pgd_idx; | ||||
| 	unsigned long vaddr; | ||||
| 
 | ||||
| 	vaddr = start; | ||||
| 	pgd_idx = pgd_index(vaddr); | ||||
| 	pgd = pgd_base + pgd_idx; | ||||
| 
 | ||||
| 	for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) { | ||||
| 		pmd_t *pmd = pmd_offset(pud_offset(pgd, vaddr), vaddr); | ||||
| 	start = round_down(start, PMD_SIZE); | ||||
| 	end = round_up(end, PMD_SIZE); | ||||
| 	for (vaddr = start; vaddr < end; vaddr += PMD_SIZE) { | ||||
| 		pmd_t *pmd = get_pmd(pgd, vaddr); | ||||
| 		if (pmd_none(*pmd)) | ||||
| 			assign_pte(pmd, alloc_pte()); | ||||
| 		vaddr += PMD_SIZE; | ||||
| 	} | ||||
| } | ||||
| #endif /* CONFIG_HIGHMEM */ | ||||
| 
 | ||||
| 
 | ||||
| #if CHIP_HAS_CBOX_HOME_MAP() | ||||
| @ -404,21 +411,6 @@ static inline pgprot_t ktext_set_nocache(pgprot_t prot) | ||||
| 	return prot; | ||||
| } | ||||
| 
 | ||||
| #ifndef __tilegx__ | ||||
| static pmd_t *__init get_pmd(pgd_t pgtables[], unsigned long va) | ||||
| { | ||||
| 	return pmd_offset(pud_offset(&pgtables[pgd_index(va)], va), va); | ||||
| } | ||||
| #else | ||||
| static pmd_t *__init get_pmd(pgd_t pgtables[], unsigned long va) | ||||
| { | ||||
| 	pud_t *pud = pud_offset(&pgtables[pgd_index(va)], va); | ||||
| 	if (pud_none(*pud)) | ||||
| 		assign_pmd(pud, alloc_pmd()); | ||||
| 	return pmd_offset(pud, va); | ||||
| } | ||||
| #endif | ||||
| 
 | ||||
| /* Temporary page table we use for staging. */ | ||||
| static pgd_t pgtables[PTRS_PER_PGD] | ||||
|  __attribute__((aligned(HV_PAGE_TABLE_ALIGN))); | ||||
| @ -741,16 +733,15 @@ static void __init set_non_bootmem_pages_init(void) | ||||
| 	for_each_zone(z) { | ||||
| 		unsigned long start, end; | ||||
| 		int nid = z->zone_pgdat->node_id; | ||||
| #ifdef CONFIG_HIGHMEM | ||||
| 		int idx = zone_idx(z); | ||||
| #endif | ||||
| 
 | ||||
| 		start = z->zone_start_pfn; | ||||
| 		if (start == 0) | ||||
| 			continue;  /* bootmem */ | ||||
| 		end = start + z->spanned_pages; | ||||
| 		if (idx == ZONE_NORMAL) { | ||||
| 			BUG_ON(start != node_start_pfn[nid]); | ||||
| 			start = node_free_pfn[nid]; | ||||
| 		} | ||||
| 		start = max(start, node_free_pfn[nid]); | ||||
| 		start = max(start, max_low_pfn); | ||||
| 
 | ||||
| #ifdef CONFIG_HIGHMEM | ||||
| 		if (idx == ZONE_HIGHMEM) | ||||
| 			totalhigh_pages += z->spanned_pages; | ||||
| @ -779,9 +770,6 @@ static void __init set_non_bootmem_pages_init(void) | ||||
|  */ | ||||
| void __init paging_init(void) | ||||
| { | ||||
| #ifdef CONFIG_HIGHMEM | ||||
| 	unsigned long vaddr, end; | ||||
| #endif | ||||
| #ifdef __tilegx__ | ||||
| 	pud_t *pud; | ||||
| #endif | ||||
| @ -789,14 +777,14 @@ void __init paging_init(void) | ||||
| 
 | ||||
| 	kernel_physical_mapping_init(pgd_base); | ||||
| 
 | ||||
| #ifdef CONFIG_HIGHMEM | ||||
| 	/*
 | ||||
| 	 * Fixed mappings, only the page table structure has to be | ||||
| 	 * created - mappings will be set by set_fixmap(): | ||||
| 	 */ | ||||
| 	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; | ||||
| 	end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; | ||||
| 	page_table_range_init(vaddr, end, pgd_base); | ||||
| 	page_table_range_init(fix_to_virt(__end_of_fixed_addresses - 1), | ||||
| 			      FIXADDR_TOP, pgd_base); | ||||
| 
 | ||||
| #ifdef CONFIG_HIGHMEM | ||||
| 	permanent_kmaps_init(pgd_base); | ||||
| #endif | ||||
| 
 | ||||
|  | ||||
| @ -575,13 +575,6 @@ void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, | ||||
| } | ||||
| EXPORT_SYMBOL(ioremap_prot); | ||||
| 
 | ||||
| /* Map a PCI MMIO bus address into VA space. */ | ||||
| void __iomem *ioremap(resource_size_t phys_addr, unsigned long size) | ||||
| { | ||||
| 	panic("ioremap for PCI MMIO is not supported"); | ||||
| } | ||||
| EXPORT_SYMBOL(ioremap); | ||||
| 
 | ||||
| /* Unmap an MMIO VA mapping. */ | ||||
| void iounmap(volatile void __iomem *addr_in) | ||||
| { | ||||
|  | ||||
| @ -2143,9 +2143,9 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB, | ||||
| DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB, | ||||
| 			quirk_unhide_mch_dev6); | ||||
| 
 | ||||
| #ifdef CONFIG_TILE | ||||
| #ifdef CONFIG_TILEPRO | ||||
| /*
 | ||||
|  * The Tilera TILEmpower platform needs to set the link speed | ||||
|  * The Tilera TILEmpower tilepro platform needs to set the link speed | ||||
|  * to 2.5GT(Giga-Transfers)/s (Gen 1). The default link speed | ||||
|  * setting is 5GT/s (Gen 2). 0x98 is the Link Control2 PCIe | ||||
|  * capability register of the PEX8624 PCIe switch. The switch | ||||
| @ -2160,7 +2160,7 @@ static void __devinit quirk_tile_plx_gen1(struct pci_dev *dev) | ||||
| 	} | ||||
| } | ||||
| DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PLX, 0x8624, quirk_tile_plx_gen1); | ||||
| #endif /* CONFIG_TILE */ | ||||
| #endif /* CONFIG_TILEPRO */ | ||||
| 
 | ||||
| #ifdef CONFIG_PCI_MSI | ||||
| /* Some chipsets do not support MSI. We cannot easily rely on setting
 | ||||
|  | ||||
| @ -1349,6 +1349,11 @@ MODULE_LICENSE ("GPL"); | ||||
| #define PLATFORM_DRIVER		ehci_msm_driver | ||||
| #endif | ||||
| 
 | ||||
| #ifdef CONFIG_TILE_USB | ||||
| #include "ehci-tilegx.c" | ||||
| #define	PLATFORM_DRIVER		ehci_hcd_tilegx_driver | ||||
| #endif | ||||
| 
 | ||||
| #ifdef CONFIG_USB_EHCI_HCD_PMC_MSP | ||||
| #include "ehci-pmcmsp.c" | ||||
| #define	PLATFORM_DRIVER		ehci_hcd_msp_driver | ||||
|  | ||||
							
								
								
									
										214
									
								
								drivers/usb/host/ehci-tilegx.c
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										214
									
								
								drivers/usb/host/ehci-tilegx.c
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,214 @@ | ||||
| /*
 | ||||
|  * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||||
|  * | ||||
|  *   This program is free software; you can redistribute it and/or | ||||
|  *   modify it under the terms of the GNU General Public License | ||||
|  *   as published by the Free Software Foundation, version 2. | ||||
|  * | ||||
|  *   This program is distributed in the hope that it will be useful, but | ||||
|  *   WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||||
|  *   NON INFRINGEMENT.  See the GNU General Public License for | ||||
|  *   more details. | ||||
|  */ | ||||
| 
 | ||||
| /*
 | ||||
|  * Tilera TILE-Gx USB EHCI host controller driver. | ||||
|  */ | ||||
| 
 | ||||
| #include <linux/irq.h> | ||||
| #include <linux/platform_device.h> | ||||
| #include <linux/usb/tilegx.h> | ||||
| #include <linux/usb.h> | ||||
| 
 | ||||
| #include <asm/homecache.h> | ||||
| 
 | ||||
| #include <gxio/iorpc_usb_host.h> | ||||
| #include <gxio/usb_host.h> | ||||
| 
 | ||||
| static void tilegx_start_ehc(void) | ||||
| { | ||||
| } | ||||
| 
 | ||||
| static void tilegx_stop_ehc(void) | ||||
| { | ||||
| } | ||||
| 
 | ||||
| static int tilegx_ehci_setup(struct usb_hcd *hcd) | ||||
| { | ||||
| 	int ret = ehci_init(hcd); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Some drivers do: | ||||
| 	 * | ||||
| 	 *   struct ehci_hcd *ehci = hcd_to_ehci(hcd); | ||||
| 	 *   ehci->need_io_watchdog = 0; | ||||
| 	 * | ||||
| 	 * here, but since this is a new driver we're going to leave the | ||||
| 	 * watchdog enabled.  Later we may try to turn it off and see | ||||
| 	 * whether we run into any problems. | ||||
| 	 */ | ||||
| 
 | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| static const struct hc_driver ehci_tilegx_hc_driver = { | ||||
| 	.description		= hcd_name, | ||||
| 	.product_desc		= "Tile-Gx EHCI", | ||||
| 	.hcd_priv_size		= sizeof(struct ehci_hcd), | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Generic hardware linkage. | ||||
| 	 */ | ||||
| 	.irq			= ehci_irq, | ||||
| 	.flags			= HCD_MEMORY | HCD_USB2, | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Basic lifecycle operations. | ||||
| 	 */ | ||||
| 	.reset			= tilegx_ehci_setup, | ||||
| 	.start			= ehci_run, | ||||
| 	.stop			= ehci_stop, | ||||
| 	.shutdown		= ehci_shutdown, | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Managing I/O requests and associated device resources. | ||||
| 	 */ | ||||
| 	.urb_enqueue		= ehci_urb_enqueue, | ||||
| 	.urb_dequeue		= ehci_urb_dequeue, | ||||
| 	.endpoint_disable	= ehci_endpoint_disable, | ||||
| 	.endpoint_reset		= ehci_endpoint_reset, | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Scheduling support. | ||||
| 	 */ | ||||
| 	.get_frame_number	= ehci_get_frame, | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Root hub support. | ||||
| 	 */ | ||||
| 	.hub_status_data	= ehci_hub_status_data, | ||||
| 	.hub_control		= ehci_hub_control, | ||||
| 	.bus_suspend		= ehci_bus_suspend, | ||||
| 	.bus_resume		= ehci_bus_resume, | ||||
| 	.relinquish_port	= ehci_relinquish_port, | ||||
| 	.port_handed_over	= ehci_port_handed_over, | ||||
| 
 | ||||
| 	.clear_tt_buffer_complete	= ehci_clear_tt_buffer_complete, | ||||
| }; | ||||
| 
 | ||||
| static int ehci_hcd_tilegx_drv_probe(struct platform_device *pdev) | ||||
| { | ||||
| 	struct usb_hcd *hcd; | ||||
| 	struct ehci_hcd *ehci; | ||||
| 	struct tilegx_usb_platform_data *pdata = pdev->dev.platform_data; | ||||
| 	pte_t pte = { 0 }; | ||||
| 	int my_cpu = smp_processor_id(); | ||||
| 	int ret; | ||||
| 
 | ||||
| 	if (usb_disabled()) | ||||
| 		return -ENODEV; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Try to initialize our GXIO context; if we can't, the device | ||||
| 	 * doesn't exist. | ||||
| 	 */ | ||||
| 	if (gxio_usb_host_init(&pdata->usb_ctx, pdata->dev_index, 1) != 0) | ||||
| 		return -ENXIO; | ||||
| 
 | ||||
| 	hcd = usb_create_hcd(&ehci_tilegx_hc_driver, &pdev->dev, | ||||
| 			     dev_name(&pdev->dev)); | ||||
| 	if (!hcd) | ||||
| 		return -ENOMEM; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * We don't use rsrc_start to map in our registers, but seems like | ||||
| 	 * we ought to set it to something, so we use the register VA. | ||||
| 	 */ | ||||
| 	hcd->rsrc_start = | ||||
| 		(ulong) gxio_usb_host_get_reg_start(&pdata->usb_ctx); | ||||
| 	hcd->rsrc_len = gxio_usb_host_get_reg_len(&pdata->usb_ctx); | ||||
| 	hcd->regs = gxio_usb_host_get_reg_start(&pdata->usb_ctx); | ||||
| 
 | ||||
| 	tilegx_start_ehc(); | ||||
| 
 | ||||
| 	ehci = hcd_to_ehci(hcd); | ||||
| 	ehci->caps = hcd->regs; | ||||
| 	ehci->regs = | ||||
| 		hcd->regs + HC_LENGTH(ehci, readl(&ehci->caps->hc_capbase)); | ||||
| 	/* cache this readonly data; minimize chip reads */ | ||||
| 	ehci->hcs_params = readl(&ehci->caps->hcs_params); | ||||
| 
 | ||||
| 	/* Create our IRQs and register them. */ | ||||
| 	pdata->irq = create_irq(); | ||||
| 	if (pdata->irq < 0) { | ||||
| 		ret = -ENXIO; | ||||
| 		goto err_no_irq; | ||||
| 	} | ||||
| 
 | ||||
| 	tile_irq_activate(pdata->irq, TILE_IRQ_PERCPU); | ||||
| 
 | ||||
| 	/* Configure interrupts. */ | ||||
| 	ret = gxio_usb_host_cfg_interrupt(&pdata->usb_ctx, | ||||
| 					  cpu_x(my_cpu), cpu_y(my_cpu), | ||||
| 					  KERNEL_PL, pdata->irq); | ||||
| 	if (ret) { | ||||
| 		ret = -ENXIO; | ||||
| 		goto err_have_irq; | ||||
| 	} | ||||
| 
 | ||||
| 	/* Register all of our memory. */ | ||||
| 	pte = pte_set_home(pte, PAGE_HOME_HASH); | ||||
| 	ret = gxio_usb_host_register_client_memory(&pdata->usb_ctx, pte, 0); | ||||
| 	if (ret) { | ||||
| 		ret = -ENXIO; | ||||
| 		goto err_have_irq; | ||||
| 	} | ||||
| 
 | ||||
| 	ret = usb_add_hcd(hcd, pdata->irq, IRQF_SHARED); | ||||
| 	if (ret == 0) { | ||||
| 		platform_set_drvdata(pdev, hcd); | ||||
| 		return ret; | ||||
| 	} | ||||
| 
 | ||||
| err_have_irq: | ||||
| 	destroy_irq(pdata->irq); | ||||
| err_no_irq: | ||||
| 	tilegx_stop_ehc(); | ||||
| 	usb_put_hcd(hcd); | ||||
| 	gxio_usb_host_destroy(&pdata->usb_ctx); | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| static int ehci_hcd_tilegx_drv_remove(struct platform_device *pdev) | ||||
| { | ||||
| 	struct usb_hcd *hcd = platform_get_drvdata(pdev); | ||||
| 	struct tilegx_usb_platform_data *pdata = pdev->dev.platform_data; | ||||
| 
 | ||||
| 	usb_remove_hcd(hcd); | ||||
| 	usb_put_hcd(hcd); | ||||
| 	tilegx_stop_ehc(); | ||||
| 	gxio_usb_host_destroy(&pdata->usb_ctx); | ||||
| 	destroy_irq(pdata->irq); | ||||
| 	platform_set_drvdata(pdev, NULL); | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| static void ehci_hcd_tilegx_drv_shutdown(struct platform_device *pdev) | ||||
| { | ||||
| 	usb_hcd_platform_shutdown(pdev); | ||||
| 	ehci_hcd_tilegx_drv_remove(pdev); | ||||
| } | ||||
| 
 | ||||
| static struct platform_driver ehci_hcd_tilegx_driver = { | ||||
| 	.probe		= ehci_hcd_tilegx_drv_probe, | ||||
| 	.remove		= ehci_hcd_tilegx_drv_remove, | ||||
| 	.shutdown	= ehci_hcd_tilegx_drv_shutdown, | ||||
| 	.driver = { | ||||
| 		.name	= "tilegx-ehci", | ||||
| 		.owner	= THIS_MODULE, | ||||
| 	} | ||||
| }; | ||||
| 
 | ||||
| MODULE_ALIAS("platform:tilegx-ehci"); | ||||
| @ -1100,6 +1100,11 @@ MODULE_LICENSE ("GPL"); | ||||
| #define PLATFORM_DRIVER		ohci_octeon_driver | ||||
| #endif | ||||
| 
 | ||||
| #ifdef CONFIG_TILE_USB | ||||
| #include "ohci-tilegx.c" | ||||
| #define PLATFORM_DRIVER		ohci_hcd_tilegx_driver | ||||
| #endif | ||||
| 
 | ||||
| #ifdef CONFIG_USB_CNS3XXX_OHCI | ||||
| #include "ohci-cns3xxx.c" | ||||
| #define PLATFORM_DRIVER		ohci_hcd_cns3xxx_driver | ||||
|  | ||||
							
								
								
									
										203
									
								
								drivers/usb/host/ohci-tilegx.c
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										203
									
								
								drivers/usb/host/ohci-tilegx.c
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,203 @@ | ||||
| /*
 | ||||
|  * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||||
|  * | ||||
|  *   This program is free software; you can redistribute it and/or | ||||
|  *   modify it under the terms of the GNU General Public License | ||||
|  *   as published by the Free Software Foundation, version 2. | ||||
|  * | ||||
|  *   This program is distributed in the hope that it will be useful, but | ||||
|  *   WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||||
|  *   NON INFRINGEMENT.  See the GNU General Public License for | ||||
|  *   more details. | ||||
|  */ | ||||
| 
 | ||||
| /*
 | ||||
|  * Tilera TILE-Gx USB OHCI host controller driver. | ||||
|  */ | ||||
| 
 | ||||
| #include <linux/irq.h> | ||||
| #include <linux/platform_device.h> | ||||
| #include <linux/usb/tilegx.h> | ||||
| #include <linux/usb.h> | ||||
| 
 | ||||
| #include <asm/homecache.h> | ||||
| 
 | ||||
| #include <gxio/iorpc_usb_host.h> | ||||
| #include <gxio/usb_host.h> | ||||
| 
 | ||||
| static void tilegx_start_ohc(void) | ||||
| { | ||||
| } | ||||
| 
 | ||||
| static void tilegx_stop_ohc(void) | ||||
| { | ||||
| } | ||||
| 
 | ||||
| static int tilegx_ohci_start(struct usb_hcd *hcd) | ||||
| { | ||||
| 	struct ohci_hcd *ohci = hcd_to_ohci(hcd); | ||||
| 	int ret; | ||||
| 
 | ||||
| 	ret = ohci_init(ohci); | ||||
| 	if (ret < 0) | ||||
| 		return ret; | ||||
| 
 | ||||
| 	ret = ohci_run(ohci); | ||||
| 	if (ret < 0) { | ||||
| 		dev_err(hcd->self.controller, "can't start %s\n", | ||||
| 			hcd->self.bus_name); | ||||
| 		ohci_stop(hcd); | ||||
| 		return ret; | ||||
| 	} | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| static const struct hc_driver ohci_tilegx_hc_driver = { | ||||
| 	.description		= hcd_name, | ||||
| 	.product_desc		= "Tile-Gx OHCI", | ||||
| 	.hcd_priv_size		= sizeof(struct ohci_hcd), | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Generic hardware linkage. | ||||
| 	 */ | ||||
| 	.irq			= ohci_irq, | ||||
| 	.flags			= HCD_MEMORY | HCD_LOCAL_MEM | HCD_USB11, | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Basic lifecycle operations. | ||||
| 	 */ | ||||
| 	.start			= tilegx_ohci_start, | ||||
| 	.stop			= ohci_stop, | ||||
| 	.shutdown		= ohci_shutdown, | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Managing I/O requests and associated device resources. | ||||
| 	 */ | ||||
| 	.urb_enqueue		= ohci_urb_enqueue, | ||||
| 	.urb_dequeue		= ohci_urb_dequeue, | ||||
| 	.endpoint_disable	= ohci_endpoint_disable, | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Scheduling support. | ||||
| 	 */ | ||||
| 	.get_frame_number	= ohci_get_frame, | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Root hub support. | ||||
| 	 */ | ||||
| 	.hub_status_data	= ohci_hub_status_data, | ||||
| 	.hub_control		= ohci_hub_control, | ||||
| 	.start_port_reset	= ohci_start_port_reset, | ||||
| }; | ||||
| 
 | ||||
| static int ohci_hcd_tilegx_drv_probe(struct platform_device *pdev) | ||||
| { | ||||
| 	struct usb_hcd *hcd; | ||||
| 	struct tilegx_usb_platform_data *pdata = pdev->dev.platform_data; | ||||
| 	pte_t pte = { 0 }; | ||||
| 	int my_cpu = smp_processor_id(); | ||||
| 	int ret; | ||||
| 
 | ||||
| 	if (usb_disabled()) | ||||
| 		return -ENODEV; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Try to initialize our GXIO context; if we can't, the device | ||||
| 	 * doesn't exist. | ||||
| 	 */ | ||||
| 	if (gxio_usb_host_init(&pdata->usb_ctx, pdata->dev_index, 0) != 0) | ||||
| 		return -ENXIO; | ||||
| 
 | ||||
| 	hcd = usb_create_hcd(&ohci_tilegx_hc_driver, &pdev->dev, | ||||
| 			     dev_name(&pdev->dev)); | ||||
| 	if (!hcd) | ||||
| 		return -ENOMEM; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * We don't use rsrc_start to map in our registers, but seems like | ||||
| 	 * we ought to set it to something, so we use the register VA. | ||||
| 	 */ | ||||
| 	hcd->rsrc_start = | ||||
| 		(ulong) gxio_usb_host_get_reg_start(&pdata->usb_ctx); | ||||
| 	hcd->rsrc_len = gxio_usb_host_get_reg_len(&pdata->usb_ctx); | ||||
| 	hcd->regs = gxio_usb_host_get_reg_start(&pdata->usb_ctx); | ||||
| 
 | ||||
| 	tilegx_start_ohc(); | ||||
| 
 | ||||
| 	/* Create our IRQs and register them. */ | ||||
| 	pdata->irq = create_irq(); | ||||
| 	if (pdata->irq < 0) { | ||||
| 		ret = -ENXIO; | ||||
| 		goto err_no_irq; | ||||
| 	} | ||||
| 
 | ||||
| 	tile_irq_activate(pdata->irq, TILE_IRQ_PERCPU); | ||||
| 
 | ||||
| 	/* Configure interrupts. */ | ||||
| 	ret = gxio_usb_host_cfg_interrupt(&pdata->usb_ctx, | ||||
| 					  cpu_x(my_cpu), cpu_y(my_cpu), | ||||
| 					  KERNEL_PL, pdata->irq); | ||||
| 	if (ret) { | ||||
| 		ret = -ENXIO; | ||||
| 		goto err_have_irq; | ||||
| 	} | ||||
| 
 | ||||
| 	/* Register all of our memory. */ | ||||
| 	pte = pte_set_home(pte, PAGE_HOME_HASH); | ||||
| 	ret = gxio_usb_host_register_client_memory(&pdata->usb_ctx, pte, 0); | ||||
| 	if (ret) { | ||||
| 		ret = -ENXIO; | ||||
| 		goto err_have_irq; | ||||
| 	} | ||||
| 
 | ||||
| 	ohci_hcd_init(hcd_to_ohci(hcd)); | ||||
| 
 | ||||
| 	ret = usb_add_hcd(hcd, pdata->irq, IRQF_SHARED); | ||||
| 	if (ret == 0) { | ||||
| 		platform_set_drvdata(pdev, hcd); | ||||
| 		return ret; | ||||
| 	} | ||||
| 
 | ||||
| err_have_irq: | ||||
| 	destroy_irq(pdata->irq); | ||||
| err_no_irq: | ||||
| 	tilegx_stop_ohc(); | ||||
| 	usb_put_hcd(hcd); | ||||
| 	gxio_usb_host_destroy(&pdata->usb_ctx); | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| static int ohci_hcd_tilegx_drv_remove(struct platform_device *pdev) | ||||
| { | ||||
| 	struct usb_hcd *hcd = platform_get_drvdata(pdev); | ||||
| 	struct tilegx_usb_platform_data* pdata = pdev->dev.platform_data; | ||||
| 
 | ||||
| 	usb_remove_hcd(hcd); | ||||
| 	usb_put_hcd(hcd); | ||||
| 	tilegx_stop_ohc(); | ||||
| 	gxio_usb_host_destroy(&pdata->usb_ctx); | ||||
| 	destroy_irq(pdata->irq); | ||||
| 	platform_set_drvdata(pdev, NULL); | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| static void ohci_hcd_tilegx_drv_shutdown(struct platform_device *pdev) | ||||
| { | ||||
| 	usb_hcd_platform_shutdown(pdev); | ||||
| 	ohci_hcd_tilegx_drv_remove(pdev); | ||||
| } | ||||
| 
 | ||||
| static struct platform_driver ohci_hcd_tilegx_driver = { | ||||
| 	.probe		= ohci_hcd_tilegx_drv_probe, | ||||
| 	.remove		= ohci_hcd_tilegx_drv_remove, | ||||
| 	.shutdown	= ohci_hcd_tilegx_drv_shutdown, | ||||
| 	.driver = { | ||||
| 		.name	= "tilegx-ohci", | ||||
| 		.owner	= THIS_MODULE, | ||||
| 	} | ||||
| }; | ||||
| 
 | ||||
| MODULE_ALIAS("platform:tilegx-ohci"); | ||||
							
								
								
									
										34
									
								
								include/linux/usb/tilegx.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										34
									
								
								include/linux/usb/tilegx.h
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,34 @@ | ||||
| /*
 | ||||
|  * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||||
|  * | ||||
|  *   This program is free software; you can redistribute it and/or | ||||
|  *   modify it under the terms of the GNU General Public License | ||||
|  *   as published by the Free Software Foundation, version 2. | ||||
|  * | ||||
|  *   This program is distributed in the hope that it will be useful, but | ||||
|  *   WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||||
|  *   NON INFRINGEMENT.  See the GNU General Public License for | ||||
|  *   more details. | ||||
|  * | ||||
|  * Structure to contain platform-specific data related to Tile-Gx USB | ||||
|  * controllers. | ||||
|  */ | ||||
| 
 | ||||
| #ifndef _LINUX_USB_TILEGX_H | ||||
| #define _LINUX_USB_TILEGX_H | ||||
| 
 | ||||
| #include <gxio/usb_host.h> | ||||
| 
 | ||||
| struct tilegx_usb_platform_data { | ||||
| 	/* GXIO device index. */ | ||||
| 	int dev_index; | ||||
| 
 | ||||
| 	/* GXIO device context. */ | ||||
| 	gxio_usb_host_context_t usb_ctx; | ||||
| 
 | ||||
| 	/* Device IRQ. */ | ||||
| 	unsigned int irq; | ||||
| }; | ||||
| 
 | ||||
| #endif /* _LINUX_USB_TILEGX_H */ | ||||
| @ -24,23 +24,25 @@ | ||||
| 
 | ||||
| static mempool_t *page_pool, *isa_page_pool; | ||||
| 
 | ||||
| #ifdef CONFIG_HIGHMEM | ||||
| #if defined(CONFIG_HIGHMEM) || defined(CONFIG_NEED_BOUNCE_POOL) | ||||
| static __init int init_emergency_pool(void) | ||||
| { | ||||
| #ifndef CONFIG_MEMORY_HOTPLUG | ||||
| #if defined(CONFIG_HIGHMEM) && !defined(CONFIG_MEMORY_HOTPLUG) | ||||
| 	if (max_pfn <= max_low_pfn) | ||||
| 		return 0; | ||||
| #endif | ||||
| 
 | ||||
| 	page_pool = mempool_create_page_pool(POOL_SIZE, 0); | ||||
| 	BUG_ON(!page_pool); | ||||
| 	printk("highmem bounce pool size: %d pages\n", POOL_SIZE); | ||||
| 	printk("bounce pool size: %d pages\n", POOL_SIZE); | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| __initcall(init_emergency_pool); | ||||
| #endif | ||||
| 
 | ||||
| #ifdef CONFIG_HIGHMEM | ||||
| /*
 | ||||
|  * highmem version, map in to vec | ||||
|  */ | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user