mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 21:21:41 +00:00
firewire updates for v6.12
The batch of changes includes the followwing: - Replacing tasklet with usual workqueue for isochronous context - Replacing IDR with XArray - Utilizing guard macro where possible - Printing deprecation warning when enabling debug parameter of firewire-ohci module Additionally, it includes a single patch for sound subsystem which the subsystem maintainer acked: - Switching to nonatomic PCM operation In FireWire subsystem, tasklet has been used as the bottom half of 1394 OHCi hardIRQ so long. In the recent kernel updates, BH workqueue has been available, and some developers have proposed replacing tasklet with BH workqueue. While it is fortunate that developers are still considering the legacy subsystem, a simple replacement is not necessarily suitable. As a first step towards dropping tasklet, I've investigated the feasibility for 1394 OHCI isochronous context, and concluded that usual workqueue is available. In the context, the batch of packets is processed in the specific queue, thus the timing jitter caused by task scheduling is not so critical. Additionally, DMA transmission can be scheduled per-packet basis, therefore the context can be sleep between the operation of transmissions. Furthermore, in-kernel protocol implementation involves some CPU-bound tasks, which can sometimes consumes CPU time so long. These characteristics suggest that usual workqueue is suitable, through BH workqueues are not. The replacement with usual workqueue allows unit drivers to process the content of packets in non-atomic context. It brings some reliefs to some drivers in sound subsystem that spin-lock is not mandatory anymore during isochronous packet processing. -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQQE66IEYNDXNBPeGKSsLtaWM8LwEwUCZu41yQAKCRCsLtaWM8Lw E4Y1AP43vZatH202NNMnbkLSW9axmHe6VHWEwDSsJT80vTbBNAD/WYV62EoQzlk1 1lzdts11SSqYPhj6tJDuRgqULlNAows= =7VMx -----END PGP SIGNATURE----- Merge tag 'firewire-updates-6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394 Pull firewire updates from Takashi Sakamoto: "In the FireWire subsystem, tasklets have been used as the bottom half of 1394 OHCi hardIRQ. In recent kernel updates, BH workqueues have become available, and some developers have proposed replacing the tasklet with a BH workqueue. As a first step towards dropping tasklet use, the 1394 OHCI isochronous context can use regular workqueues. In this context, the batch of packets is processed in the specific queue, thus the timing jitter caused by task scheduling is not so critical. Additionally, DMA transmission can be scheduled per-packet basis, therefore the context can be sleep between the operation of transmissions. Furthermore, in-kernel protocol implementation involves some CPU-bound tasks, which can sometimes consumes CPU time so long. These characteristics suggest that normal workqueues are suitable, through BH workqueues are not. The replacement with a workqueue allows unit drivers to process the content of packets in non-atomic context. It brings some reliefs to some drivers in sound subsystem that spin-lock is not mandatory anymore during isochronous packet processing. Summary: - Replace tasklet with workqueue for isochronous context - Replace IDR with XArray - Utilize guard macro where possible - Print deprecation warning when enabling debug parameter of firewire-ohci module - Switch to nonatomic PCM operation" * tag 'firewire-updates-6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394: (55 commits) firewire: core: rename cause flag of tracepoints event firewire: core: update documentation of kernel APIs for flushing completions firewire: core: add helper function to retire descriptors Revert "firewire: core: move workqueue handler from 1394 OHCI driver to core function" Revert "firewire: core: use mutex to coordinate concurrent calls to flush completions" firewire: core: use mutex to coordinate concurrent calls to flush completions firewire: core: move workqueue handler from 1394 OHCI driver to core function firewire: core: fulfill documentation of fw_iso_context_flush_completions() firewire: core: expose kernel API to schedule work item to process isochronous context firewire: core: use WARN_ON_ONCE() to avoid superfluous dumps ALSA: firewire: use nonatomic PCM operation firewire: core: non-atomic memory allocation for isochronous event to user client firewire: ohci: operate IT/IR events in sleepable work process instead of tasklet softIRQ firewire: core: add local API to queue work item to workqueue specific to isochronous contexts firewire: core: allocate workqueue to handle isochronous contexts in card firewire: ohci: obsolete direct usage of printk_ratelimit() firewire: ohci: deprecate debug parameter firewire: core: update fw_device outside of device_find_child() firewire: ohci: fix error path to detect initiated reset in TI TSB41BA3D phy firewire: core/ohci: minor refactoring for computation of configuration ROM size ...
This commit is contained in:
commit
d7dfb07d4d
@ -43,6 +43,8 @@ Firewire core transaction interfaces
|
||||
Firewire Isochronous I/O interfaces
|
||||
===================================
|
||||
|
||||
.. kernel-doc:: include/linux/firewire.h
|
||||
:functions: fw_iso_context_schedule_flush_completions
|
||||
.. kernel-doc:: drivers/firewire/core-iso.c
|
||||
:export:
|
||||
|
||||
|
@ -168,7 +168,6 @@ static size_t required_space(struct fw_descriptor *desc)
|
||||
int fw_core_add_descriptor(struct fw_descriptor *desc)
|
||||
{
|
||||
size_t i;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Check descriptor is valid; the length of all blocks in the
|
||||
@ -182,29 +181,25 @@ int fw_core_add_descriptor(struct fw_descriptor *desc)
|
||||
if (i != desc->length)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&card_mutex);
|
||||
guard(mutex)(&card_mutex);
|
||||
|
||||
if (config_rom_length + required_space(desc) > 256) {
|
||||
ret = -EBUSY;
|
||||
} else {
|
||||
list_add_tail(&desc->link, &descriptor_list);
|
||||
config_rom_length += required_space(desc);
|
||||
if (config_rom_length + required_space(desc) > 256)
|
||||
return -EBUSY;
|
||||
|
||||
list_add_tail(&desc->link, &descriptor_list);
|
||||
config_rom_length += required_space(desc);
|
||||
descriptor_count++;
|
||||
if (desc->immediate > 0)
|
||||
descriptor_count++;
|
||||
if (desc->immediate > 0)
|
||||
descriptor_count++;
|
||||
update_config_roms();
|
||||
ret = 0;
|
||||
}
|
||||
update_config_roms();
|
||||
|
||||
mutex_unlock(&card_mutex);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(fw_core_add_descriptor);
|
||||
|
||||
void fw_core_remove_descriptor(struct fw_descriptor *desc)
|
||||
{
|
||||
mutex_lock(&card_mutex);
|
||||
guard(mutex)(&card_mutex);
|
||||
|
||||
list_del(&desc->link);
|
||||
config_rom_length -= required_space(desc);
|
||||
@ -212,8 +207,6 @@ void fw_core_remove_descriptor(struct fw_descriptor *desc)
|
||||
if (desc->immediate > 0)
|
||||
descriptor_count--;
|
||||
update_config_roms();
|
||||
|
||||
mutex_unlock(&card_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL(fw_core_remove_descriptor);
|
||||
|
||||
@ -381,11 +374,11 @@ static void bm_work(struct work_struct *work)
|
||||
|
||||
bm_id = be32_to_cpu(transaction_data[0]);
|
||||
|
||||
spin_lock_irq(&card->lock);
|
||||
if (rcode == RCODE_COMPLETE && generation == card->generation)
|
||||
card->bm_node_id =
|
||||
bm_id == 0x3f ? local_id : 0xffc0 | bm_id;
|
||||
spin_unlock_irq(&card->lock);
|
||||
scoped_guard(spinlock_irq, &card->lock) {
|
||||
if (rcode == RCODE_COMPLETE && generation == card->generation)
|
||||
card->bm_node_id =
|
||||
bm_id == 0x3f ? local_id : 0xffc0 | bm_id;
|
||||
}
|
||||
|
||||
if (rcode == RCODE_COMPLETE && bm_id != 0x3f) {
|
||||
/* Somebody else is BM. Only act as IRM. */
|
||||
@ -578,25 +571,47 @@ void fw_card_initialize(struct fw_card *card,
|
||||
}
|
||||
EXPORT_SYMBOL(fw_card_initialize);
|
||||
|
||||
int fw_card_add(struct fw_card *card,
|
||||
u32 max_receive, u32 link_speed, u64 guid)
|
||||
int fw_card_add(struct fw_card *card, u32 max_receive, u32 link_speed, u64 guid,
|
||||
unsigned int supported_isoc_contexts)
|
||||
{
|
||||
struct workqueue_struct *isoc_wq;
|
||||
int ret;
|
||||
|
||||
// This workqueue should be:
|
||||
// * != WQ_BH Sleepable.
|
||||
// * == WQ_UNBOUND Any core can process data for isoc context. The
|
||||
// implementation of unit protocol could consumes the core
|
||||
// longer somehow.
|
||||
// * != WQ_MEM_RECLAIM Not used for any backend of block device.
|
||||
// * == WQ_FREEZABLE Isochronous communication is at regular interval in real
|
||||
// time, thus should be drained if possible at freeze phase.
|
||||
// * == WQ_HIGHPRI High priority to process semi-realtime timestamped data.
|
||||
// * == WQ_SYSFS Parameters are available via sysfs.
|
||||
// * max_active == n_it + n_ir A hardIRQ could notify events for multiple isochronous
|
||||
// contexts if they are scheduled to the same cycle.
|
||||
isoc_wq = alloc_workqueue("firewire-isoc-card%u",
|
||||
WQ_UNBOUND | WQ_FREEZABLE | WQ_HIGHPRI | WQ_SYSFS,
|
||||
supported_isoc_contexts, card->index);
|
||||
if (!isoc_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
card->max_receive = max_receive;
|
||||
card->link_speed = link_speed;
|
||||
card->guid = guid;
|
||||
|
||||
mutex_lock(&card_mutex);
|
||||
guard(mutex)(&card_mutex);
|
||||
|
||||
generate_config_rom(card, tmp_config_rom);
|
||||
ret = card->driver->enable(card, tmp_config_rom, config_rom_length);
|
||||
if (ret == 0)
|
||||
list_add_tail(&card->link, &card_list);
|
||||
if (ret < 0) {
|
||||
destroy_workqueue(isoc_wq);
|
||||
return ret;
|
||||
}
|
||||
|
||||
mutex_unlock(&card_mutex);
|
||||
card->isoc_wq = isoc_wq;
|
||||
list_add_tail(&card->link, &card_list);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(fw_card_add);
|
||||
|
||||
@ -714,29 +729,31 @@ EXPORT_SYMBOL_GPL(fw_card_release);
|
||||
void fw_core_remove_card(struct fw_card *card)
|
||||
{
|
||||
struct fw_card_driver dummy_driver = dummy_driver_template;
|
||||
unsigned long flags;
|
||||
|
||||
might_sleep();
|
||||
|
||||
card->driver->update_phy_reg(card, 4,
|
||||
PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
|
||||
fw_schedule_bus_reset(card, false, true);
|
||||
|
||||
mutex_lock(&card_mutex);
|
||||
list_del_init(&card->link);
|
||||
mutex_unlock(&card_mutex);
|
||||
scoped_guard(mutex, &card_mutex)
|
||||
list_del_init(&card->link);
|
||||
|
||||
/* Switch off most of the card driver interface. */
|
||||
dummy_driver.free_iso_context = card->driver->free_iso_context;
|
||||
dummy_driver.stop_iso = card->driver->stop_iso;
|
||||
card->driver = &dummy_driver;
|
||||
drain_workqueue(card->isoc_wq);
|
||||
|
||||
spin_lock_irqsave(&card->lock, flags);
|
||||
fw_destroy_nodes(card);
|
||||
spin_unlock_irqrestore(&card->lock, flags);
|
||||
scoped_guard(spinlock_irqsave, &card->lock)
|
||||
fw_destroy_nodes(card);
|
||||
|
||||
/* Wait for all users, especially device workqueue jobs, to finish. */
|
||||
fw_card_put(card);
|
||||
wait_for_completion(&card->done);
|
||||
|
||||
destroy_workqueue(card->isoc_wq);
|
||||
|
||||
WARN_ON(!list_empty(&card->transaction_list));
|
||||
}
|
||||
EXPORT_SYMBOL(fw_core_remove_card);
|
||||
|
@ -14,7 +14,6 @@
|
||||
#include <linux/errno.h>
|
||||
#include <linux/firewire.h>
|
||||
#include <linux/firewire-cdev.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/irqflags.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/kernel.h>
|
||||
@ -37,6 +36,8 @@
|
||||
#include "core.h"
|
||||
#include <trace/events/firewire.h>
|
||||
|
||||
#include "packet-header-definitions.h"
|
||||
|
||||
/*
|
||||
* ABI version history is documented in linux/firewire-cdev.h.
|
||||
*/
|
||||
@ -52,7 +53,7 @@ struct client {
|
||||
|
||||
spinlock_t lock;
|
||||
bool in_shutdown;
|
||||
struct idr resource_idr;
|
||||
struct xarray resource_xa;
|
||||
struct list_head event_list;
|
||||
wait_queue_head_t wait;
|
||||
wait_queue_head_t tx_flush_wait;
|
||||
@ -137,8 +138,41 @@ struct iso_resource {
|
||||
struct iso_resource_event *e_alloc, *e_dealloc;
|
||||
};
|
||||
|
||||
static struct address_handler_resource *to_address_handler_resource(struct client_resource *resource)
|
||||
{
|
||||
return container_of(resource, struct address_handler_resource, resource);
|
||||
}
|
||||
|
||||
static struct inbound_transaction_resource *to_inbound_transaction_resource(struct client_resource *resource)
|
||||
{
|
||||
return container_of(resource, struct inbound_transaction_resource, resource);
|
||||
}
|
||||
|
||||
static struct descriptor_resource *to_descriptor_resource(struct client_resource *resource)
|
||||
{
|
||||
return container_of(resource, struct descriptor_resource, resource);
|
||||
}
|
||||
|
||||
static struct iso_resource *to_iso_resource(struct client_resource *resource)
|
||||
{
|
||||
return container_of(resource, struct iso_resource, resource);
|
||||
}
|
||||
|
||||
static void release_iso_resource(struct client *, struct client_resource *);
|
||||
|
||||
static int is_iso_resource(const struct client_resource *resource)
|
||||
{
|
||||
return resource->release == release_iso_resource;
|
||||
}
|
||||
|
||||
static void release_transaction(struct client *client,
|
||||
struct client_resource *resource);
|
||||
|
||||
static int is_outbound_transaction_resource(const struct client_resource *resource)
|
||||
{
|
||||
return resource->release == release_transaction;
|
||||
}
|
||||
|
||||
static void schedule_iso_resource(struct iso_resource *r, unsigned long delay)
|
||||
{
|
||||
client_get(r->client);
|
||||
@ -146,13 +180,6 @@ static void schedule_iso_resource(struct iso_resource *r, unsigned long delay)
|
||||
client_put(r->client);
|
||||
}
|
||||
|
||||
static void schedule_if_iso_resource(struct client_resource *resource)
|
||||
{
|
||||
if (resource->release == release_iso_resource)
|
||||
schedule_iso_resource(container_of(resource,
|
||||
struct iso_resource, resource), 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* dequeue_event() just kfree()'s the event, so the event has to be
|
||||
* the first field in a struct XYZ_event.
|
||||
@ -269,7 +296,7 @@ static int fw_device_op_open(struct inode *inode, struct file *file)
|
||||
|
||||
client->device = device;
|
||||
spin_lock_init(&client->lock);
|
||||
idr_init(&client->resource_idr);
|
||||
xa_init_flags(&client->resource_xa, XA_FLAGS_ALLOC1 | XA_FLAGS_LOCK_BH);
|
||||
INIT_LIST_HEAD(&client->event_list);
|
||||
init_waitqueue_head(&client->wait);
|
||||
init_waitqueue_head(&client->tx_flush_wait);
|
||||
@ -285,19 +312,17 @@ static int fw_device_op_open(struct inode *inode, struct file *file)
|
||||
static void queue_event(struct client *client, struct event *event,
|
||||
void *data0, size_t size0, void *data1, size_t size1)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
event->v[0].data = data0;
|
||||
event->v[0].size = size0;
|
||||
event->v[1].data = data1;
|
||||
event->v[1].size = size1;
|
||||
|
||||
spin_lock_irqsave(&client->lock, flags);
|
||||
if (client->in_shutdown)
|
||||
kfree(event);
|
||||
else
|
||||
list_add_tail(&event->link, &client->event_list);
|
||||
spin_unlock_irqrestore(&client->lock, flags);
|
||||
scoped_guard(spinlock_irqsave, &client->lock) {
|
||||
if (client->in_shutdown)
|
||||
kfree(event);
|
||||
else
|
||||
list_add_tail(&event->link, &client->event_list);
|
||||
}
|
||||
|
||||
wake_up_interruptible(&client->wait);
|
||||
}
|
||||
@ -319,10 +344,10 @@ static int dequeue_event(struct client *client,
|
||||
fw_device_is_shutdown(client->device))
|
||||
return -ENODEV;
|
||||
|
||||
spin_lock_irq(&client->lock);
|
||||
event = list_first_entry(&client->event_list, struct event, link);
|
||||
list_del(&event->link);
|
||||
spin_unlock_irq(&client->lock);
|
||||
scoped_guard(spinlock_irq, &client->lock) {
|
||||
event = list_first_entry(&client->event_list, struct event, link);
|
||||
list_del(&event->link);
|
||||
}
|
||||
|
||||
total = 0;
|
||||
for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
|
||||
@ -354,7 +379,7 @@ static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
|
||||
{
|
||||
struct fw_card *card = client->device->card;
|
||||
|
||||
spin_lock_irq(&card->lock);
|
||||
guard(spinlock_irq)(&card->lock);
|
||||
|
||||
event->closure = client->bus_reset_closure;
|
||||
event->type = FW_CDEV_EVENT_BUS_RESET;
|
||||
@ -364,8 +389,6 @@ static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
|
||||
event->bm_node_id = card->bm_node_id;
|
||||
event->irm_node_id = card->irm_node->node_id;
|
||||
event->root_node_id = card->root_node->node_id;
|
||||
|
||||
spin_unlock_irq(&card->lock);
|
||||
}
|
||||
|
||||
static void for_each_client(struct fw_device *device,
|
||||
@ -373,22 +396,17 @@ static void for_each_client(struct fw_device *device,
|
||||
{
|
||||
struct client *c;
|
||||
|
||||
mutex_lock(&device->client_list_mutex);
|
||||
guard(mutex)(&device->client_list_mutex);
|
||||
|
||||
list_for_each_entry(c, &device->client_list, link)
|
||||
callback(c);
|
||||
mutex_unlock(&device->client_list_mutex);
|
||||
}
|
||||
|
||||
static int schedule_reallocations(int id, void *p, void *data)
|
||||
{
|
||||
schedule_if_iso_resource(p);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void queue_bus_reset_event(struct client *client)
|
||||
{
|
||||
struct bus_reset_event *e;
|
||||
struct client_resource *resource;
|
||||
unsigned long index;
|
||||
|
||||
e = kzalloc(sizeof(*e), GFP_KERNEL);
|
||||
if (e == NULL)
|
||||
@ -399,9 +417,12 @@ static void queue_bus_reset_event(struct client *client)
|
||||
queue_event(client, &e->event,
|
||||
&e->reset, sizeof(e->reset), NULL, 0);
|
||||
|
||||
spin_lock_irq(&client->lock);
|
||||
idr_for_each(&client->resource_idr, schedule_reallocations, client);
|
||||
spin_unlock_irq(&client->lock);
|
||||
guard(spinlock_irq)(&client->lock);
|
||||
|
||||
xa_for_each(&client->resource_xa, index, resource) {
|
||||
if (is_iso_resource(resource))
|
||||
schedule_iso_resource(to_iso_resource(resource), 0);
|
||||
}
|
||||
}
|
||||
|
||||
void fw_device_cdev_update(struct fw_device *device)
|
||||
@ -452,23 +473,20 @@ static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
|
||||
a->version = FW_CDEV_KERNEL_VERSION;
|
||||
a->card = client->device->card->index;
|
||||
|
||||
down_read(&fw_device_rwsem);
|
||||
scoped_guard(rwsem_read, &fw_device_rwsem) {
|
||||
if (a->rom != 0) {
|
||||
size_t want = a->rom_length;
|
||||
size_t have = client->device->config_rom_length * 4;
|
||||
|
||||
if (a->rom != 0) {
|
||||
size_t want = a->rom_length;
|
||||
size_t have = client->device->config_rom_length * 4;
|
||||
|
||||
ret = copy_to_user(u64_to_uptr(a->rom),
|
||||
client->device->config_rom, min(want, have));
|
||||
ret = copy_to_user(u64_to_uptr(a->rom), client->device->config_rom,
|
||||
min(want, have));
|
||||
if (ret != 0)
|
||||
return -EFAULT;
|
||||
}
|
||||
a->rom_length = client->device->config_rom_length * 4;
|
||||
}
|
||||
a->rom_length = client->device->config_rom_length * 4;
|
||||
|
||||
up_read(&fw_device_rwsem);
|
||||
|
||||
if (ret != 0)
|
||||
return -EFAULT;
|
||||
|
||||
mutex_lock(&client->device->client_list_mutex);
|
||||
guard(mutex)(&client->device->client_list_mutex);
|
||||
|
||||
client->bus_reset_closure = a->bus_reset_closure;
|
||||
if (a->bus_reset != 0) {
|
||||
@ -479,37 +497,36 @@ static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
|
||||
if (ret == 0 && list_empty(&client->link))
|
||||
list_add_tail(&client->link, &client->device->client_list);
|
||||
|
||||
mutex_unlock(&client->device->client_list_mutex);
|
||||
|
||||
return ret ? -EFAULT : 0;
|
||||
}
|
||||
|
||||
static int add_client_resource(struct client *client,
|
||||
struct client_resource *resource, gfp_t gfp_mask)
|
||||
static int add_client_resource(struct client *client, struct client_resource *resource,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
bool preload = gfpflags_allow_blocking(gfp_mask);
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
if (preload)
|
||||
idr_preload(gfp_mask);
|
||||
spin_lock_irqsave(&client->lock, flags);
|
||||
scoped_guard(spinlock_irqsave, &client->lock) {
|
||||
u32 index;
|
||||
|
||||
if (client->in_shutdown)
|
||||
ret = -ECANCELED;
|
||||
else
|
||||
ret = idr_alloc(&client->resource_idr, resource, 0, 0,
|
||||
GFP_NOWAIT);
|
||||
if (ret >= 0) {
|
||||
resource->handle = ret;
|
||||
client_get(client);
|
||||
schedule_if_iso_resource(resource);
|
||||
if (client->in_shutdown) {
|
||||
ret = -ECANCELED;
|
||||
} else {
|
||||
if (gfpflags_allow_blocking(gfp_mask)) {
|
||||
ret = xa_alloc(&client->resource_xa, &index, resource, xa_limit_32b,
|
||||
GFP_NOWAIT);
|
||||
} else {
|
||||
ret = xa_alloc_bh(&client->resource_xa, &index, resource,
|
||||
xa_limit_32b, GFP_NOWAIT);
|
||||
}
|
||||
}
|
||||
if (ret >= 0) {
|
||||
resource->handle = index;
|
||||
client_get(client);
|
||||
if (is_iso_resource(resource))
|
||||
schedule_iso_resource(to_iso_resource(resource), 0);
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&client->lock, flags);
|
||||
if (preload)
|
||||
idr_preload_end();
|
||||
|
||||
return ret < 0 ? ret : 0;
|
||||
}
|
||||
|
||||
@ -517,19 +534,19 @@ static int release_client_resource(struct client *client, u32 handle,
|
||||
client_resource_release_fn_t release,
|
||||
struct client_resource **return_resource)
|
||||
{
|
||||
unsigned long index = handle;
|
||||
struct client_resource *resource;
|
||||
|
||||
spin_lock_irq(&client->lock);
|
||||
if (client->in_shutdown)
|
||||
resource = NULL;
|
||||
else
|
||||
resource = idr_find(&client->resource_idr, handle);
|
||||
if (resource && resource->release == release)
|
||||
idr_remove(&client->resource_idr, handle);
|
||||
spin_unlock_irq(&client->lock);
|
||||
scoped_guard(spinlock_irq, &client->lock) {
|
||||
if (client->in_shutdown)
|
||||
return -EINVAL;
|
||||
|
||||
if (!(resource && resource->release == release))
|
||||
return -EINVAL;
|
||||
resource = xa_load(&client->resource_xa, index);
|
||||
if (!resource || resource->release != release)
|
||||
return -EINVAL;
|
||||
|
||||
xa_erase(&client->resource_xa, handle);
|
||||
}
|
||||
|
||||
if (return_resource)
|
||||
*return_resource = resource;
|
||||
@ -551,13 +568,13 @@ static void complete_transaction(struct fw_card *card, int rcode, u32 request_ts
|
||||
{
|
||||
struct outbound_transaction_event *e = data;
|
||||
struct client *client = e->client;
|
||||
unsigned long flags;
|
||||
unsigned long index = e->r.resource.handle;
|
||||
|
||||
spin_lock_irqsave(&client->lock, flags);
|
||||
idr_remove(&client->resource_idr, e->r.resource.handle);
|
||||
if (client->in_shutdown)
|
||||
wake_up(&client->tx_flush_wait);
|
||||
spin_unlock_irqrestore(&client->lock, flags);
|
||||
scoped_guard(spinlock_irqsave, &client->lock) {
|
||||
xa_erase(&client->resource_xa, index);
|
||||
if (client->in_shutdown)
|
||||
wake_up(&client->tx_flush_wait);
|
||||
}
|
||||
|
||||
switch (e->rsp.without_tstamp.type) {
|
||||
case FW_CDEV_EVENT_RESPONSE:
|
||||
@ -599,13 +616,13 @@ static void complete_transaction(struct fw_card *card, int rcode, u32 request_ts
|
||||
queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length, NULL, 0);
|
||||
|
||||
break;
|
||||
}
|
||||
default:
|
||||
WARN_ON(1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Drop the idr's reference */
|
||||
// Drop the xarray's reference.
|
||||
client_put(client);
|
||||
}
|
||||
|
||||
@ -693,8 +710,7 @@ static int ioctl_send_request(struct client *client, union ioctl_arg *arg)
|
||||
static void release_request(struct client *client,
|
||||
struct client_resource *resource)
|
||||
{
|
||||
struct inbound_transaction_resource *r = container_of(resource,
|
||||
struct inbound_transaction_resource, resource);
|
||||
struct inbound_transaction_resource *r = to_inbound_transaction_resource(resource);
|
||||
|
||||
if (r->is_fcp)
|
||||
fw_request_put(r->request);
|
||||
@ -804,8 +820,7 @@ static void handle_request(struct fw_card *card, struct fw_request *request,
|
||||
static void release_address_handler(struct client *client,
|
||||
struct client_resource *resource)
|
||||
{
|
||||
struct address_handler_resource *r =
|
||||
container_of(resource, struct address_handler_resource, resource);
|
||||
struct address_handler_resource *r = to_address_handler_resource(resource);
|
||||
|
||||
fw_core_remove_address_handler(&r->handler);
|
||||
kfree(r);
|
||||
@ -869,8 +884,7 @@ static int ioctl_send_response(struct client *client, union ioctl_arg *arg)
|
||||
release_request, &resource) < 0)
|
||||
return -EINVAL;
|
||||
|
||||
r = container_of(resource, struct inbound_transaction_resource,
|
||||
resource);
|
||||
r = to_inbound_transaction_resource(resource);
|
||||
if (r->is_fcp) {
|
||||
fw_request_put(r->request);
|
||||
goto out;
|
||||
@ -904,8 +918,7 @@ static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg)
|
||||
static void release_descriptor(struct client *client,
|
||||
struct client_resource *resource)
|
||||
{
|
||||
struct descriptor_resource *r =
|
||||
container_of(resource, struct descriptor_resource, resource);
|
||||
struct descriptor_resource *r = to_descriptor_resource(resource);
|
||||
|
||||
fw_core_remove_descriptor(&r->descriptor);
|
||||
kfree(r);
|
||||
@ -969,7 +982,7 @@ static void iso_callback(struct fw_iso_context *context, u32 cycle,
|
||||
struct client *client = data;
|
||||
struct iso_interrupt_event *e;
|
||||
|
||||
e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC);
|
||||
e = kmalloc(sizeof(*e) + header_length, GFP_KERNEL);
|
||||
if (e == NULL)
|
||||
return;
|
||||
|
||||
@ -988,7 +1001,7 @@ static void iso_mc_callback(struct fw_iso_context *context,
|
||||
struct client *client = data;
|
||||
struct iso_interrupt_mc_event *e;
|
||||
|
||||
e = kmalloc(sizeof(*e), GFP_ATOMIC);
|
||||
e = kmalloc(sizeof(*e), GFP_KERNEL);
|
||||
if (e == NULL)
|
||||
return;
|
||||
|
||||
@ -1070,10 +1083,10 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
|
||||
if (client->version < FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW)
|
||||
context->drop_overflow_headers = true;
|
||||
|
||||
/* We only support one context at this time. */
|
||||
spin_lock_irq(&client->lock);
|
||||
// We only support one context at this time.
|
||||
guard(spinlock_irq)(&client->lock);
|
||||
|
||||
if (client->iso_context != NULL) {
|
||||
spin_unlock_irq(&client->lock);
|
||||
fw_iso_context_destroy(context);
|
||||
|
||||
return -EBUSY;
|
||||
@ -1083,7 +1096,6 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
|
||||
client->device->card,
|
||||
iso_dma_direction(context));
|
||||
if (ret < 0) {
|
||||
spin_unlock_irq(&client->lock);
|
||||
fw_iso_context_destroy(context);
|
||||
|
||||
return ret;
|
||||
@ -1092,7 +1104,6 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
|
||||
}
|
||||
client->iso_closure = a->closure;
|
||||
client->iso_context = context;
|
||||
spin_unlock_irq(&client->lock);
|
||||
|
||||
a->handle = 0;
|
||||
|
||||
@ -1266,29 +1277,27 @@ static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg)
|
||||
struct fw_card *card = client->device->card;
|
||||
struct timespec64 ts = {0, 0};
|
||||
u32 cycle_time = 0;
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
local_irq_disable();
|
||||
guard(irq)();
|
||||
|
||||
ret = fw_card_read_cycle_time(card, &cycle_time);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
return ret;
|
||||
|
||||
switch (a->clk_id) {
|
||||
case CLOCK_REALTIME: ktime_get_real_ts64(&ts); break;
|
||||
case CLOCK_MONOTONIC: ktime_get_ts64(&ts); break;
|
||||
case CLOCK_MONOTONIC_RAW: ktime_get_raw_ts64(&ts); break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
return -EINVAL;
|
||||
}
|
||||
end:
|
||||
local_irq_enable();
|
||||
|
||||
a->tv_sec = ts.tv_sec;
|
||||
a->tv_nsec = ts.tv_nsec;
|
||||
a->cycle_timer = cycle_time;
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ioctl_get_cycle_timer(struct client *client, union ioctl_arg *arg)
|
||||
@ -1311,28 +1320,28 @@ static void iso_resource_work(struct work_struct *work)
|
||||
struct iso_resource *r =
|
||||
container_of(work, struct iso_resource, work.work);
|
||||
struct client *client = r->client;
|
||||
unsigned long index = r->resource.handle;
|
||||
int generation, channel, bandwidth, todo;
|
||||
bool skip, free, success;
|
||||
|
||||
spin_lock_irq(&client->lock);
|
||||
generation = client->device->generation;
|
||||
todo = r->todo;
|
||||
/* Allow 1000ms grace period for other reallocations. */
|
||||
if (todo == ISO_RES_ALLOC &&
|
||||
time_before64(get_jiffies_64(),
|
||||
client->device->card->reset_jiffies + HZ)) {
|
||||
schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3));
|
||||
skip = true;
|
||||
} else {
|
||||
/* We could be called twice within the same generation. */
|
||||
skip = todo == ISO_RES_REALLOC &&
|
||||
r->generation == generation;
|
||||
scoped_guard(spinlock_irq, &client->lock) {
|
||||
generation = client->device->generation;
|
||||
todo = r->todo;
|
||||
// Allow 1000ms grace period for other reallocations.
|
||||
if (todo == ISO_RES_ALLOC &&
|
||||
time_before64(get_jiffies_64(), client->device->card->reset_jiffies + HZ)) {
|
||||
schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3));
|
||||
skip = true;
|
||||
} else {
|
||||
// We could be called twice within the same generation.
|
||||
skip = todo == ISO_RES_REALLOC &&
|
||||
r->generation == generation;
|
||||
}
|
||||
free = todo == ISO_RES_DEALLOC ||
|
||||
todo == ISO_RES_ALLOC_ONCE ||
|
||||
todo == ISO_RES_DEALLOC_ONCE;
|
||||
r->generation = generation;
|
||||
}
|
||||
free = todo == ISO_RES_DEALLOC ||
|
||||
todo == ISO_RES_ALLOC_ONCE ||
|
||||
todo == ISO_RES_DEALLOC_ONCE;
|
||||
r->generation = generation;
|
||||
spin_unlock_irq(&client->lock);
|
||||
|
||||
if (skip)
|
||||
goto out;
|
||||
@ -1346,7 +1355,7 @@ static void iso_resource_work(struct work_struct *work)
|
||||
todo == ISO_RES_ALLOC_ONCE);
|
||||
/*
|
||||
* Is this generation outdated already? As long as this resource sticks
|
||||
* in the idr, it will be scheduled again for a newer generation or at
|
||||
* in the xarray, it will be scheduled again for a newer generation or at
|
||||
* shutdown.
|
||||
*/
|
||||
if (channel == -EAGAIN &&
|
||||
@ -1355,24 +1364,20 @@ static void iso_resource_work(struct work_struct *work)
|
||||
|
||||
success = channel >= 0 || bandwidth > 0;
|
||||
|
||||
spin_lock_irq(&client->lock);
|
||||
/*
|
||||
* Transit from allocation to reallocation, except if the client
|
||||
* requested deallocation in the meantime.
|
||||
*/
|
||||
if (r->todo == ISO_RES_ALLOC)
|
||||
r->todo = ISO_RES_REALLOC;
|
||||
/*
|
||||
* Allocation or reallocation failure? Pull this resource out of the
|
||||
* idr and prepare for deletion, unless the client is shutting down.
|
||||
*/
|
||||
if (r->todo == ISO_RES_REALLOC && !success &&
|
||||
!client->in_shutdown &&
|
||||
idr_remove(&client->resource_idr, r->resource.handle)) {
|
||||
client_put(client);
|
||||
free = true;
|
||||
scoped_guard(spinlock_irq, &client->lock) {
|
||||
// Transit from allocation to reallocation, except if the client
|
||||
// requested deallocation in the meantime.
|
||||
if (r->todo == ISO_RES_ALLOC)
|
||||
r->todo = ISO_RES_REALLOC;
|
||||
// Allocation or reallocation failure? Pull this resource out of the
|
||||
// xarray and prepare for deletion, unless the client is shutting down.
|
||||
if (r->todo == ISO_RES_REALLOC && !success &&
|
||||
!client->in_shutdown &&
|
||||
xa_erase(&client->resource_xa, index)) {
|
||||
client_put(client);
|
||||
free = true;
|
||||
}
|
||||
}
|
||||
spin_unlock_irq(&client->lock);
|
||||
|
||||
if (todo == ISO_RES_ALLOC && channel >= 0)
|
||||
r->channels = 1ULL << channel;
|
||||
@ -1407,13 +1412,12 @@ static void iso_resource_work(struct work_struct *work)
|
||||
static void release_iso_resource(struct client *client,
|
||||
struct client_resource *resource)
|
||||
{
|
||||
struct iso_resource *r =
|
||||
container_of(resource, struct iso_resource, resource);
|
||||
struct iso_resource *r = to_iso_resource(resource);
|
||||
|
||||
guard(spinlock_irq)(&client->lock);
|
||||
|
||||
spin_lock_irq(&client->lock);
|
||||
r->todo = ISO_RES_DEALLOC;
|
||||
schedule_iso_resource(r, 0);
|
||||
spin_unlock_irq(&client->lock);
|
||||
}
|
||||
|
||||
static int init_iso_resource(struct client *client,
|
||||
@ -1635,7 +1639,7 @@ static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
|
||||
e->client = client;
|
||||
e->p.speed = SCODE_100;
|
||||
e->p.generation = a->generation;
|
||||
e->p.header[0] = TCODE_LINK_INTERNAL << 4;
|
||||
async_header_set_tcode(e->p.header, TCODE_LINK_INTERNAL);
|
||||
e->p.header[1] = a->data[0];
|
||||
e->p.header[2] = a->data[1];
|
||||
e->p.header_length = 12;
|
||||
@ -1676,26 +1680,22 @@ static int ioctl_receive_phy_packets(struct client *client, union ioctl_arg *arg
|
||||
if (!client->device->is_local)
|
||||
return -ENOSYS;
|
||||
|
||||
spin_lock_irq(&card->lock);
|
||||
guard(spinlock_irq)(&card->lock);
|
||||
|
||||
list_move_tail(&client->phy_receiver_link, &card->phy_receiver_list);
|
||||
client->phy_receiver_closure = a->closure;
|
||||
|
||||
spin_unlock_irq(&card->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p)
|
||||
{
|
||||
struct client *client;
|
||||
struct inbound_phy_packet_event *e;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&card->lock, flags);
|
||||
guard(spinlock_irqsave)(&card->lock);
|
||||
|
||||
list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) {
|
||||
e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC);
|
||||
struct inbound_phy_packet_event *e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC);
|
||||
if (e == NULL)
|
||||
break;
|
||||
|
||||
@ -1723,8 +1723,6 @@ void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p)
|
||||
queue_event(client, &e->event, &e->phy_packet, sizeof(*pp) + 8, NULL, 0);
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&card->lock, flags);
|
||||
}
|
||||
|
||||
static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = {
|
||||
@ -1821,16 +1819,15 @@ static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
spin_lock_irq(&client->lock);
|
||||
if (client->iso_context) {
|
||||
ret = fw_iso_buffer_map_dma(&client->buffer,
|
||||
client->device->card,
|
||||
iso_dma_direction(client->iso_context));
|
||||
client->buffer_is_mapped = (ret == 0);
|
||||
scoped_guard(spinlock_irq, &client->lock) {
|
||||
if (client->iso_context) {
|
||||
ret = fw_iso_buffer_map_dma(&client->buffer, client->device->card,
|
||||
iso_dma_direction(client->iso_context));
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
client->buffer_is_mapped = true;
|
||||
}
|
||||
}
|
||||
spin_unlock_irq(&client->lock);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
|
||||
ret = vm_map_pages_zero(vma, client->buffer.pages,
|
||||
client->buffer.page_count);
|
||||
@ -1843,48 +1840,33 @@ static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int is_outbound_transaction_resource(int id, void *p, void *data)
|
||||
static bool has_outbound_transactions(struct client *client)
|
||||
{
|
||||
struct client_resource *resource = p;
|
||||
struct client_resource *resource;
|
||||
unsigned long index;
|
||||
|
||||
return resource->release == release_transaction;
|
||||
}
|
||||
guard(spinlock_irq)(&client->lock);
|
||||
|
||||
static int has_outbound_transactions(struct client *client)
|
||||
{
|
||||
int ret;
|
||||
xa_for_each(&client->resource_xa, index, resource) {
|
||||
if (is_outbound_transaction_resource(resource))
|
||||
return true;
|
||||
}
|
||||
|
||||
spin_lock_irq(&client->lock);
|
||||
ret = idr_for_each(&client->resource_idr,
|
||||
is_outbound_transaction_resource, NULL);
|
||||
spin_unlock_irq(&client->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int shutdown_resource(int id, void *p, void *data)
|
||||
{
|
||||
struct client_resource *resource = p;
|
||||
struct client *client = data;
|
||||
|
||||
resource->release(client, resource);
|
||||
client_put(client);
|
||||
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
static int fw_device_op_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct client *client = file->private_data;
|
||||
struct event *event, *next_event;
|
||||
struct client_resource *resource;
|
||||
unsigned long index;
|
||||
|
||||
spin_lock_irq(&client->device->card->lock);
|
||||
list_del(&client->phy_receiver_link);
|
||||
spin_unlock_irq(&client->device->card->lock);
|
||||
scoped_guard(spinlock_irq, &client->device->card->lock)
|
||||
list_del(&client->phy_receiver_link);
|
||||
|
||||
mutex_lock(&client->device->client_list_mutex);
|
||||
list_del(&client->link);
|
||||
mutex_unlock(&client->device->client_list_mutex);
|
||||
scoped_guard(mutex, &client->device->client_list_mutex)
|
||||
list_del(&client->link);
|
||||
|
||||
if (client->iso_context)
|
||||
fw_iso_context_destroy(client->iso_context);
|
||||
@ -1892,15 +1874,17 @@ static int fw_device_op_release(struct inode *inode, struct file *file)
|
||||
if (client->buffer.pages)
|
||||
fw_iso_buffer_destroy(&client->buffer, client->device->card);
|
||||
|
||||
/* Freeze client->resource_idr and client->event_list */
|
||||
spin_lock_irq(&client->lock);
|
||||
client->in_shutdown = true;
|
||||
spin_unlock_irq(&client->lock);
|
||||
// Freeze client->resource_xa and client->event_list.
|
||||
scoped_guard(spinlock_irq, &client->lock)
|
||||
client->in_shutdown = true;
|
||||
|
||||
wait_event(client->tx_flush_wait, !has_outbound_transactions(client));
|
||||
|
||||
idr_for_each(&client->resource_idr, shutdown_resource, client);
|
||||
idr_destroy(&client->resource_idr);
|
||||
xa_for_each(&client->resource_xa, index, resource) {
|
||||
resource->release(client, resource);
|
||||
client_put(client);
|
||||
}
|
||||
xa_destroy(&client->resource_xa);
|
||||
|
||||
list_for_each_entry_safe(event, next_event, &client->event_list, link)
|
||||
kfree(event);
|
||||
|
@ -12,7 +12,6 @@
|
||||
#include <linux/errno.h>
|
||||
#include <linux/firewire.h>
|
||||
#include <linux/firewire-constants.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/list.h>
|
||||
@ -288,7 +287,7 @@ static ssize_t show_immediate(struct device *dev,
|
||||
const u32 *directories[] = {NULL, NULL};
|
||||
int i, value = -1;
|
||||
|
||||
down_read(&fw_device_rwsem);
|
||||
guard(rwsem_read)(&fw_device_rwsem);
|
||||
|
||||
if (is_fw_unit(dev)) {
|
||||
directories[0] = fw_unit(dev)->directory;
|
||||
@ -317,8 +316,6 @@ static ssize_t show_immediate(struct device *dev,
|
||||
}
|
||||
}
|
||||
|
||||
up_read(&fw_device_rwsem);
|
||||
|
||||
if (value < 0)
|
||||
return -ENOENT;
|
||||
|
||||
@ -339,7 +336,7 @@ static ssize_t show_text_leaf(struct device *dev,
|
||||
char dummy_buf[2];
|
||||
int i, ret = -ENOENT;
|
||||
|
||||
down_read(&fw_device_rwsem);
|
||||
guard(rwsem_read)(&fw_device_rwsem);
|
||||
|
||||
if (is_fw_unit(dev)) {
|
||||
directories[0] = fw_unit(dev)->directory;
|
||||
@ -382,15 +379,14 @@ static ssize_t show_text_leaf(struct device *dev,
|
||||
}
|
||||
}
|
||||
|
||||
if (ret >= 0) {
|
||||
/* Strip trailing whitespace and add newline. */
|
||||
while (ret > 0 && isspace(buf[ret - 1]))
|
||||
ret--;
|
||||
strcpy(buf + ret, "\n");
|
||||
ret++;
|
||||
}
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
up_read(&fw_device_rwsem);
|
||||
// Strip trailing whitespace and add newline.
|
||||
while (ret > 0 && isspace(buf[ret - 1]))
|
||||
ret--;
|
||||
strcpy(buf + ret, "\n");
|
||||
ret++;
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -466,10 +462,10 @@ static ssize_t config_rom_show(struct device *dev,
|
||||
struct fw_device *device = fw_device(dev);
|
||||
size_t length;
|
||||
|
||||
down_read(&fw_device_rwsem);
|
||||
guard(rwsem_read)(&fw_device_rwsem);
|
||||
|
||||
length = device->config_rom_length * 4;
|
||||
memcpy(buf, device->config_rom, length);
|
||||
up_read(&fw_device_rwsem);
|
||||
|
||||
return length;
|
||||
}
|
||||
@ -478,13 +474,10 @@ static ssize_t guid_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct fw_device *device = fw_device(dev);
|
||||
int ret;
|
||||
|
||||
down_read(&fw_device_rwsem);
|
||||
ret = sysfs_emit(buf, "0x%08x%08x\n", device->config_rom[3], device->config_rom[4]);
|
||||
up_read(&fw_device_rwsem);
|
||||
guard(rwsem_read)(&fw_device_rwsem);
|
||||
|
||||
return ret;
|
||||
return sysfs_emit(buf, "0x%08x%08x\n", device->config_rom[3], device->config_rom[4]);
|
||||
}
|
||||
|
||||
static ssize_t is_local_show(struct device *dev,
|
||||
@ -524,7 +517,8 @@ static ssize_t units_show(struct device *dev,
|
||||
struct fw_csr_iterator ci;
|
||||
int key, value, i = 0;
|
||||
|
||||
down_read(&fw_device_rwsem);
|
||||
guard(rwsem_read)(&fw_device_rwsem);
|
||||
|
||||
fw_csr_iterator_init(&ci, &device->config_rom[ROOT_DIR_OFFSET]);
|
||||
while (fw_csr_iterator_next(&ci, &key, &value)) {
|
||||
if (key != (CSR_UNIT | CSR_DIRECTORY))
|
||||
@ -533,7 +527,6 @@ static ssize_t units_show(struct device *dev,
|
||||
if (i >= PAGE_SIZE - (8 + 1 + 8 + 1))
|
||||
break;
|
||||
}
|
||||
up_read(&fw_device_rwsem);
|
||||
|
||||
if (i)
|
||||
buf[i - 1] = '\n';
|
||||
@ -571,7 +564,8 @@ static int read_rom(struct fw_device *device,
|
||||
return rcode;
|
||||
}
|
||||
|
||||
#define MAX_CONFIG_ROM_SIZE 256
|
||||
// By quadlet unit.
|
||||
#define MAX_CONFIG_ROM_SIZE ((CSR_CONFIG_ROM_END - CSR_CONFIG_ROM) / sizeof(u32))
|
||||
|
||||
/*
|
||||
* Read the bus info block, perform a speed probe, and read all of the rest of
|
||||
@ -729,10 +723,10 @@ static int read_config_rom(struct fw_device *device, int generation)
|
||||
goto out;
|
||||
}
|
||||
|
||||
down_write(&fw_device_rwsem);
|
||||
device->config_rom = new_rom;
|
||||
device->config_rom_length = length;
|
||||
up_write(&fw_device_rwsem);
|
||||
scoped_guard(rwsem_write, &fw_device_rwsem) {
|
||||
device->config_rom = new_rom;
|
||||
device->config_rom_length = length;
|
||||
}
|
||||
|
||||
kfree(old_rom);
|
||||
ret = RCODE_COMPLETE;
|
||||
@ -813,24 +807,21 @@ static int shutdown_unit(struct device *device, void *data)
|
||||
|
||||
/*
|
||||
* fw_device_rwsem acts as dual purpose mutex:
|
||||
* - serializes accesses to fw_device_idr,
|
||||
* - serializes accesses to fw_device.config_rom/.config_rom_length and
|
||||
* fw_unit.directory, unless those accesses happen at safe occasions
|
||||
*/
|
||||
DECLARE_RWSEM(fw_device_rwsem);
|
||||
|
||||
DEFINE_IDR(fw_device_idr);
|
||||
DEFINE_XARRAY_ALLOC(fw_device_xa);
|
||||
int fw_cdev_major;
|
||||
|
||||
struct fw_device *fw_device_get_by_devt(dev_t devt)
|
||||
{
|
||||
struct fw_device *device;
|
||||
|
||||
down_read(&fw_device_rwsem);
|
||||
device = idr_find(&fw_device_idr, MINOR(devt));
|
||||
device = xa_load(&fw_device_xa, MINOR(devt));
|
||||
if (device)
|
||||
fw_device_get(device);
|
||||
up_read(&fw_device_rwsem);
|
||||
|
||||
return device;
|
||||
}
|
||||
@ -864,7 +855,6 @@ static void fw_device_shutdown(struct work_struct *work)
|
||||
{
|
||||
struct fw_device *device =
|
||||
container_of(work, struct fw_device, work.work);
|
||||
int minor = MINOR(device->device.devt);
|
||||
|
||||
if (time_before64(get_jiffies_64(),
|
||||
device->card->reset_jiffies + SHUTDOWN_DELAY)
|
||||
@ -882,9 +872,7 @@ static void fw_device_shutdown(struct work_struct *work)
|
||||
device_for_each_child(&device->device, NULL, shutdown_unit);
|
||||
device_unregister(&device->device);
|
||||
|
||||
down_write(&fw_device_rwsem);
|
||||
idr_remove(&fw_device_idr, minor);
|
||||
up_write(&fw_device_rwsem);
|
||||
xa_erase(&fw_device_xa, MINOR(device->device.devt));
|
||||
|
||||
fw_device_put(device);
|
||||
}
|
||||
@ -893,16 +881,14 @@ static void fw_device_release(struct device *dev)
|
||||
{
|
||||
struct fw_device *device = fw_device(dev);
|
||||
struct fw_card *card = device->card;
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* Take the card lock so we don't set this to NULL while a
|
||||
* FW_NODE_UPDATED callback is being handled or while the
|
||||
* bus manager work looks at this node.
|
||||
*/
|
||||
spin_lock_irqsave(&card->lock, flags);
|
||||
device->node->data = NULL;
|
||||
spin_unlock_irqrestore(&card->lock, flags);
|
||||
scoped_guard(spinlock_irqsave, &card->lock)
|
||||
device->node->data = NULL;
|
||||
|
||||
fw_node_put(device->node);
|
||||
kfree(device->config_rom);
|
||||
@ -942,59 +928,6 @@ static void fw_device_update(struct work_struct *work)
|
||||
device_for_each_child(&device->device, NULL, update_unit);
|
||||
}
|
||||
|
||||
/*
|
||||
* If a device was pending for deletion because its node went away but its
|
||||
* bus info block and root directory header matches that of a newly discovered
|
||||
* device, revive the existing fw_device.
|
||||
* The newly allocated fw_device becomes obsolete instead.
|
||||
*/
|
||||
static int lookup_existing_device(struct device *dev, void *data)
|
||||
{
|
||||
struct fw_device *old = fw_device(dev);
|
||||
struct fw_device *new = data;
|
||||
struct fw_card *card = new->card;
|
||||
int match = 0;
|
||||
|
||||
if (!is_fw_device(dev))
|
||||
return 0;
|
||||
|
||||
down_read(&fw_device_rwsem); /* serialize config_rom access */
|
||||
spin_lock_irq(&card->lock); /* serialize node access */
|
||||
|
||||
if (memcmp(old->config_rom, new->config_rom, 6 * 4) == 0 &&
|
||||
atomic_cmpxchg(&old->state,
|
||||
FW_DEVICE_GONE,
|
||||
FW_DEVICE_RUNNING) == FW_DEVICE_GONE) {
|
||||
struct fw_node *current_node = new->node;
|
||||
struct fw_node *obsolete_node = old->node;
|
||||
|
||||
new->node = obsolete_node;
|
||||
new->node->data = new;
|
||||
old->node = current_node;
|
||||
old->node->data = old;
|
||||
|
||||
old->max_speed = new->max_speed;
|
||||
old->node_id = current_node->node_id;
|
||||
smp_wmb(); /* update node_id before generation */
|
||||
old->generation = card->generation;
|
||||
old->config_rom_retries = 0;
|
||||
fw_notice(card, "rediscovered device %s\n", dev_name(dev));
|
||||
|
||||
old->workfn = fw_device_update;
|
||||
fw_schedule_device_work(old, 0);
|
||||
|
||||
if (current_node == card->root_node)
|
||||
fw_schedule_bm_work(card, 0);
|
||||
|
||||
match = 1;
|
||||
}
|
||||
|
||||
spin_unlock_irq(&card->lock);
|
||||
up_read(&fw_device_rwsem);
|
||||
|
||||
return match;
|
||||
}
|
||||
|
||||
enum { BC_UNKNOWN = 0, BC_UNIMPLEMENTED, BC_IMPLEMENTED, };
|
||||
|
||||
static void set_broadcast_channel(struct fw_device *device, int generation)
|
||||
@ -1055,13 +988,26 @@ int fw_device_set_broadcast_channel(struct device *dev, void *gen)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int compare_configuration_rom(struct device *dev, void *data)
|
||||
{
|
||||
const struct fw_device *old = fw_device(dev);
|
||||
const u32 *config_rom = data;
|
||||
|
||||
if (!is_fw_device(dev))
|
||||
return 0;
|
||||
|
||||
// Compare the bus information block and root_length/root_crc.
|
||||
return !memcmp(old->config_rom, config_rom, 6 * 4);
|
||||
}
|
||||
|
||||
static void fw_device_init(struct work_struct *work)
|
||||
{
|
||||
struct fw_device *device =
|
||||
container_of(work, struct fw_device, work.work);
|
||||
struct fw_card *card = device->card;
|
||||
struct device *revived_dev;
|
||||
int minor, ret;
|
||||
struct device *found;
|
||||
u32 minor;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* All failure paths here set node->data to NULL, so that we
|
||||
@ -1087,24 +1033,62 @@ static void fw_device_init(struct work_struct *work)
|
||||
return;
|
||||
}
|
||||
|
||||
revived_dev = device_find_child(card->device,
|
||||
device, lookup_existing_device);
|
||||
if (revived_dev) {
|
||||
put_device(revived_dev);
|
||||
fw_device_release(&device->device);
|
||||
// If a device was pending for deletion because its node went away but its bus info block
|
||||
// and root directory header matches that of a newly discovered device, revive the
|
||||
// existing fw_device. The newly allocated fw_device becomes obsolete instead.
|
||||
//
|
||||
// serialize config_rom access.
|
||||
scoped_guard(rwsem_read, &fw_device_rwsem) {
|
||||
found = device_find_child(card->device, (void *)device->config_rom,
|
||||
compare_configuration_rom);
|
||||
}
|
||||
if (found) {
|
||||
struct fw_device *reused = fw_device(found);
|
||||
|
||||
return;
|
||||
if (atomic_cmpxchg(&reused->state,
|
||||
FW_DEVICE_GONE,
|
||||
FW_DEVICE_RUNNING) == FW_DEVICE_GONE) {
|
||||
// serialize node access
|
||||
scoped_guard(spinlock_irq, &card->lock) {
|
||||
struct fw_node *current_node = device->node;
|
||||
struct fw_node *obsolete_node = reused->node;
|
||||
|
||||
device->node = obsolete_node;
|
||||
device->node->data = device;
|
||||
reused->node = current_node;
|
||||
reused->node->data = reused;
|
||||
|
||||
reused->max_speed = device->max_speed;
|
||||
reused->node_id = current_node->node_id;
|
||||
smp_wmb(); /* update node_id before generation */
|
||||
reused->generation = card->generation;
|
||||
reused->config_rom_retries = 0;
|
||||
fw_notice(card, "rediscovered device %s\n",
|
||||
dev_name(found));
|
||||
|
||||
reused->workfn = fw_device_update;
|
||||
fw_schedule_device_work(reused, 0);
|
||||
|
||||
if (current_node == card->root_node)
|
||||
fw_schedule_bm_work(card, 0);
|
||||
}
|
||||
|
||||
put_device(found);
|
||||
fw_device_release(&device->device);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
put_device(found);
|
||||
}
|
||||
|
||||
device_initialize(&device->device);
|
||||
|
||||
fw_device_get(device);
|
||||
down_write(&fw_device_rwsem);
|
||||
minor = idr_alloc(&fw_device_idr, device, 0, 1 << MINORBITS,
|
||||
GFP_KERNEL);
|
||||
up_write(&fw_device_rwsem);
|
||||
|
||||
if (minor < 0)
|
||||
// The index of allocated entry is used for minor identifier of device node.
|
||||
ret = xa_alloc(&fw_device_xa, &minor, device, XA_LIMIT(0, MINORMASK), GFP_KERNEL);
|
||||
if (ret < 0)
|
||||
goto error;
|
||||
|
||||
device->device.bus = &fw_bus_type;
|
||||
@ -1165,11 +1149,9 @@ static void fw_device_init(struct work_struct *work)
|
||||
return;
|
||||
|
||||
error_with_cdev:
|
||||
down_write(&fw_device_rwsem);
|
||||
idr_remove(&fw_device_idr, minor);
|
||||
up_write(&fw_device_rwsem);
|
||||
xa_erase(&fw_device_xa, minor);
|
||||
error:
|
||||
fw_device_put(device); /* fw_device_idr's reference */
|
||||
fw_device_put(device); // fw_device_xa's reference.
|
||||
|
||||
put_device(&device->device); /* our reference */
|
||||
}
|
||||
|
@ -209,23 +209,63 @@ void fw_iso_context_queue_flush(struct fw_iso_context *ctx)
|
||||
}
|
||||
EXPORT_SYMBOL(fw_iso_context_queue_flush);
|
||||
|
||||
/**
|
||||
* fw_iso_context_flush_completions() - process isochronous context in current process context.
|
||||
* @ctx: the isochronous context
|
||||
*
|
||||
* Process the isochronous context in the current process context. The registered callback function
|
||||
* is called when a queued packet buffer with the interrupt flag is completed, either after
|
||||
* transmission in the IT context or after being filled in the IR context. Additionally, the
|
||||
* callback function is also called for the packet buffer completed at last. Furthermore, the
|
||||
* callback function is called as well when the header buffer in the context becomes full. If it is
|
||||
* required to process the context asynchronously, fw_iso_context_schedule_flush_completions() is
|
||||
* available instead.
|
||||
*
|
||||
* Context: Process context. May sleep due to disable_work_sync().
|
||||
*/
|
||||
int fw_iso_context_flush_completions(struct fw_iso_context *ctx)
|
||||
{
|
||||
int err;
|
||||
|
||||
trace_isoc_outbound_flush_completions(ctx);
|
||||
trace_isoc_inbound_single_flush_completions(ctx);
|
||||
trace_isoc_inbound_multiple_flush_completions(ctx);
|
||||
|
||||
return ctx->card->driver->flush_iso_completions(ctx);
|
||||
might_sleep();
|
||||
|
||||
// Avoid dead lock due to programming mistake.
|
||||
if (WARN_ON_ONCE(current_work() == &ctx->work))
|
||||
return 0;
|
||||
|
||||
disable_work_sync(&ctx->work);
|
||||
|
||||
err = ctx->card->driver->flush_iso_completions(ctx);
|
||||
|
||||
enable_work(&ctx->work);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(fw_iso_context_flush_completions);
|
||||
|
||||
int fw_iso_context_stop(struct fw_iso_context *ctx)
|
||||
{
|
||||
int err;
|
||||
|
||||
trace_isoc_outbound_stop(ctx);
|
||||
trace_isoc_inbound_single_stop(ctx);
|
||||
trace_isoc_inbound_multiple_stop(ctx);
|
||||
|
||||
return ctx->card->driver->stop_iso(ctx);
|
||||
might_sleep();
|
||||
|
||||
// Avoid dead lock due to programming mistake.
|
||||
if (WARN_ON_ONCE(current_work() == &ctx->work))
|
||||
return 0;
|
||||
|
||||
err = ctx->card->driver->stop_iso(ctx);
|
||||
|
||||
cancel_work_sync(&ctx->work);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(fw_iso_context_stop);
|
||||
|
||||
@ -375,9 +415,8 @@ void fw_iso_resource_manage(struct fw_card *card, int generation,
|
||||
u32 channels_lo = channels_mask >> 32; /* channels 63...32 */
|
||||
int irm_id, ret, c = -EINVAL;
|
||||
|
||||
spin_lock_irq(&card->lock);
|
||||
irm_id = card->irm_node->node_id;
|
||||
spin_unlock_irq(&card->lock);
|
||||
scoped_guard(spinlock_irq, &card->lock)
|
||||
irm_id = card->irm_node->node_id;
|
||||
|
||||
if (channels_hi)
|
||||
c = manage_channel(card, irm_id, generation, channels_hi,
|
||||
|
@ -39,7 +39,7 @@ static struct fw_node *fw_node_create(u32 sid, int port_count, int color)
|
||||
node->initiated_reset = phy_packet_self_id_zero_get_initiated_reset(sid);
|
||||
node->port_count = port_count;
|
||||
|
||||
refcount_set(&node->ref_count, 1);
|
||||
kref_init(&node->kref);
|
||||
INIT_LIST_HEAD(&node->link);
|
||||
|
||||
return node;
|
||||
@ -455,11 +455,10 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
|
||||
int self_id_count, u32 *self_ids, bool bm_abdicate)
|
||||
{
|
||||
struct fw_node *local_node;
|
||||
unsigned long flags;
|
||||
|
||||
trace_bus_reset_handle(card->index, generation, node_id, bm_abdicate, self_ids, self_id_count);
|
||||
|
||||
spin_lock_irqsave(&card->lock, flags);
|
||||
guard(spinlock_irqsave)(&card->lock);
|
||||
|
||||
/*
|
||||
* If the selfID buffer is not the immediate successor of the
|
||||
@ -500,7 +499,5 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
|
||||
} else {
|
||||
update_tree(card, local_node);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&card->lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(fw_core_handle_bus_reset);
|
||||
|
@ -13,7 +13,6 @@
|
||||
#include <linux/firewire-constants.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/list.h>
|
||||
@ -49,35 +48,31 @@ static int close_transaction(struct fw_transaction *transaction, struct fw_card
|
||||
u32 response_tstamp)
|
||||
{
|
||||
struct fw_transaction *t = NULL, *iter;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&card->lock, flags);
|
||||
list_for_each_entry(iter, &card->transaction_list, link) {
|
||||
if (iter == transaction) {
|
||||
if (!try_cancel_split_timeout(iter)) {
|
||||
spin_unlock_irqrestore(&card->lock, flags);
|
||||
goto timed_out;
|
||||
scoped_guard(spinlock_irqsave, &card->lock) {
|
||||
list_for_each_entry(iter, &card->transaction_list, link) {
|
||||
if (iter == transaction) {
|
||||
if (try_cancel_split_timeout(iter)) {
|
||||
list_del_init(&iter->link);
|
||||
card->tlabel_mask &= ~(1ULL << iter->tlabel);
|
||||
t = iter;
|
||||
}
|
||||
break;
|
||||
}
|
||||
list_del_init(&iter->link);
|
||||
card->tlabel_mask &= ~(1ULL << iter->tlabel);
|
||||
t = iter;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&card->lock, flags);
|
||||
|
||||
if (t) {
|
||||
if (!t->with_tstamp) {
|
||||
t->callback.without_tstamp(card, rcode, NULL, 0, t->callback_data);
|
||||
} else {
|
||||
t->callback.with_tstamp(card, rcode, t->packet.timestamp, response_tstamp,
|
||||
NULL, 0, t->callback_data);
|
||||
}
|
||||
return 0;
|
||||
if (!t)
|
||||
return -ENOENT;
|
||||
|
||||
if (!t->with_tstamp) {
|
||||
t->callback.without_tstamp(card, rcode, NULL, 0, t->callback_data);
|
||||
} else {
|
||||
t->callback.with_tstamp(card, rcode, t->packet.timestamp, response_tstamp, NULL, 0,
|
||||
t->callback_data);
|
||||
}
|
||||
|
||||
timed_out:
|
||||
return -ENOENT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -121,16 +116,13 @@ static void split_transaction_timeout_callback(struct timer_list *timer)
|
||||
{
|
||||
struct fw_transaction *t = from_timer(t, timer, split_timeout_timer);
|
||||
struct fw_card *card = t->card;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&card->lock, flags);
|
||||
if (list_empty(&t->link)) {
|
||||
spin_unlock_irqrestore(&card->lock, flags);
|
||||
return;
|
||||
scoped_guard(spinlock_irqsave, &card->lock) {
|
||||
if (list_empty(&t->link))
|
||||
return;
|
||||
list_del(&t->link);
|
||||
card->tlabel_mask &= ~(1ULL << t->tlabel);
|
||||
}
|
||||
list_del(&t->link);
|
||||
card->tlabel_mask &= ~(1ULL << t->tlabel);
|
||||
spin_unlock_irqrestore(&card->lock, flags);
|
||||
|
||||
if (!t->with_tstamp) {
|
||||
t->callback.without_tstamp(card, RCODE_CANCELLED, NULL, 0, t->callback_data);
|
||||
@ -143,20 +135,14 @@ static void split_transaction_timeout_callback(struct timer_list *timer)
|
||||
static void start_split_transaction_timeout(struct fw_transaction *t,
|
||||
struct fw_card *card)
|
||||
{
|
||||
unsigned long flags;
|
||||
guard(spinlock_irqsave)(&card->lock);
|
||||
|
||||
spin_lock_irqsave(&card->lock, flags);
|
||||
|
||||
if (list_empty(&t->link) || WARN_ON(t->is_split_transaction)) {
|
||||
spin_unlock_irqrestore(&card->lock, flags);
|
||||
if (list_empty(&t->link) || WARN_ON(t->is_split_transaction))
|
||||
return;
|
||||
}
|
||||
|
||||
t->is_split_transaction = true;
|
||||
mod_timer(&t->split_timeout_timer,
|
||||
jiffies + card->split_timeout_jiffies);
|
||||
|
||||
spin_unlock_irqrestore(&card->lock, flags);
|
||||
}
|
||||
|
||||
static u32 compute_split_timeout_timestamp(struct fw_card *card, u32 request_timestamp);
|
||||
@ -464,7 +450,6 @@ static void transmit_phy_packet_callback(struct fw_packet *packet,
|
||||
|
||||
static struct fw_packet phy_config_packet = {
|
||||
.header_length = 12,
|
||||
.header[0] = TCODE_LINK_INTERNAL << 4,
|
||||
.payload_length = 0,
|
||||
.speed = SCODE_100,
|
||||
.callback = transmit_phy_packet_callback,
|
||||
@ -495,8 +480,9 @@ void fw_send_phy_config(struct fw_card *card,
|
||||
phy_packet_phy_config_set_gap_count(&data, gap_count);
|
||||
phy_packet_phy_config_set_gap_count_optimization(&data, true);
|
||||
|
||||
mutex_lock(&phy_config_mutex);
|
||||
guard(mutex)(&phy_config_mutex);
|
||||
|
||||
async_header_set_tcode(phy_config_packet.header, TCODE_LINK_INTERNAL);
|
||||
phy_config_packet.header[1] = data;
|
||||
phy_config_packet.header[2] = ~data;
|
||||
phy_config_packet.generation = generation;
|
||||
@ -508,8 +494,6 @@ void fw_send_phy_config(struct fw_card *card,
|
||||
|
||||
card->driver->send_request(card, &phy_config_packet);
|
||||
wait_for_completion_timeout(&phy_config_done, timeout);
|
||||
|
||||
mutex_unlock(&phy_config_mutex);
|
||||
}
|
||||
|
||||
static struct fw_address_handler *lookup_overlapping_address_handler(
|
||||
@ -598,7 +582,7 @@ int fw_core_add_address_handler(struct fw_address_handler *handler,
|
||||
handler->length == 0)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock(&address_handler_list_lock);
|
||||
guard(spinlock)(&address_handler_list_lock);
|
||||
|
||||
handler->offset = region->start;
|
||||
while (handler->offset + handler->length <= region->end) {
|
||||
@ -617,8 +601,6 @@ int fw_core_add_address_handler(struct fw_address_handler *handler,
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock(&address_handler_list_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(fw_core_add_address_handler);
|
||||
@ -634,9 +616,9 @@ EXPORT_SYMBOL(fw_core_add_address_handler);
|
||||
*/
|
||||
void fw_core_remove_address_handler(struct fw_address_handler *handler)
|
||||
{
|
||||
spin_lock(&address_handler_list_lock);
|
||||
list_del_rcu(&handler->link);
|
||||
spin_unlock(&address_handler_list_lock);
|
||||
scoped_guard(spinlock, &address_handler_list_lock)
|
||||
list_del_rcu(&handler->link);
|
||||
|
||||
synchronize_rcu();
|
||||
}
|
||||
EXPORT_SYMBOL(fw_core_remove_address_handler);
|
||||
@ -927,16 +909,14 @@ static void handle_exclusive_region_request(struct fw_card *card,
|
||||
if (tcode == TCODE_LOCK_REQUEST)
|
||||
tcode = 0x10 + async_header_get_extended_tcode(p->header);
|
||||
|
||||
rcu_read_lock();
|
||||
handler = lookup_enclosing_address_handler(&address_handler_list,
|
||||
offset, request->length);
|
||||
if (handler)
|
||||
handler->address_callback(card, request,
|
||||
tcode, destination, source,
|
||||
p->generation, offset,
|
||||
request->data, request->length,
|
||||
handler->callback_data);
|
||||
rcu_read_unlock();
|
||||
scoped_guard(rcu) {
|
||||
handler = lookup_enclosing_address_handler(&address_handler_list, offset,
|
||||
request->length);
|
||||
if (handler)
|
||||
handler->address_callback(card, request, tcode, destination, source,
|
||||
p->generation, offset, request->data,
|
||||
request->length, handler->callback_data);
|
||||
}
|
||||
|
||||
if (!handler)
|
||||
fw_send_response(card, request, RCODE_ADDRESS_ERROR);
|
||||
@ -969,17 +949,14 @@ static void handle_fcp_region_request(struct fw_card *card,
|
||||
return;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(handler, &address_handler_list, link) {
|
||||
if (is_enclosing_handler(handler, offset, request->length))
|
||||
handler->address_callback(card, request, tcode,
|
||||
destination, source,
|
||||
p->generation, offset,
|
||||
request->data,
|
||||
request->length,
|
||||
handler->callback_data);
|
||||
scoped_guard(rcu) {
|
||||
list_for_each_entry_rcu(handler, &address_handler_list, link) {
|
||||
if (is_enclosing_handler(handler, offset, request->length))
|
||||
handler->address_callback(card, request, tcode, destination, source,
|
||||
p->generation, offset, request->data,
|
||||
request->length, handler->callback_data);
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
fw_send_response(card, request, RCODE_COMPLETE);
|
||||
}
|
||||
@ -1024,7 +1001,6 @@ EXPORT_SYMBOL(fw_core_handle_request);
|
||||
void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
|
||||
{
|
||||
struct fw_transaction *t = NULL, *iter;
|
||||
unsigned long flags;
|
||||
u32 *data;
|
||||
size_t data_length;
|
||||
int tcode, tlabel, source, rcode;
|
||||
@ -1063,26 +1039,23 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
|
||||
break;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&card->lock, flags);
|
||||
list_for_each_entry(iter, &card->transaction_list, link) {
|
||||
if (iter->node_id == source && iter->tlabel == tlabel) {
|
||||
if (!try_cancel_split_timeout(iter)) {
|
||||
spin_unlock_irqrestore(&card->lock, flags);
|
||||
goto timed_out;
|
||||
scoped_guard(spinlock_irqsave, &card->lock) {
|
||||
list_for_each_entry(iter, &card->transaction_list, link) {
|
||||
if (iter->node_id == source && iter->tlabel == tlabel) {
|
||||
if (try_cancel_split_timeout(iter)) {
|
||||
list_del_init(&iter->link);
|
||||
card->tlabel_mask &= ~(1ULL << iter->tlabel);
|
||||
t = iter;
|
||||
}
|
||||
break;
|
||||
}
|
||||
list_del_init(&iter->link);
|
||||
card->tlabel_mask &= ~(1ULL << iter->tlabel);
|
||||
t = iter;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&card->lock, flags);
|
||||
|
||||
trace_async_response_inbound((uintptr_t)t, card->index, p->generation, p->speed, p->ack,
|
||||
p->timestamp, p->header, data, data_length / 4);
|
||||
|
||||
if (!t) {
|
||||
timed_out:
|
||||
fw_notice(card, "unsolicited response (source %x, tlabel %x)\n",
|
||||
source, tlabel);
|
||||
return;
|
||||
@ -1186,7 +1159,6 @@ static void handle_registers(struct fw_card *card, struct fw_request *request,
|
||||
int reg = offset & ~CSR_REGISTER_BASE;
|
||||
__be32 *data = payload;
|
||||
int rcode = RCODE_COMPLETE;
|
||||
unsigned long flags;
|
||||
|
||||
switch (reg) {
|
||||
case CSR_PRIORITY_BUDGET:
|
||||
@ -1228,10 +1200,10 @@ static void handle_registers(struct fw_card *card, struct fw_request *request,
|
||||
if (tcode == TCODE_READ_QUADLET_REQUEST) {
|
||||
*data = cpu_to_be32(card->split_timeout_hi);
|
||||
} else if (tcode == TCODE_WRITE_QUADLET_REQUEST) {
|
||||
spin_lock_irqsave(&card->lock, flags);
|
||||
guard(spinlock_irqsave)(&card->lock);
|
||||
|
||||
card->split_timeout_hi = be32_to_cpu(*data) & 7;
|
||||
update_split_timeout(card);
|
||||
spin_unlock_irqrestore(&card->lock, flags);
|
||||
} else {
|
||||
rcode = RCODE_TYPE_ERROR;
|
||||
}
|
||||
@ -1241,11 +1213,10 @@ static void handle_registers(struct fw_card *card, struct fw_request *request,
|
||||
if (tcode == TCODE_READ_QUADLET_REQUEST) {
|
||||
*data = cpu_to_be32(card->split_timeout_lo);
|
||||
} else if (tcode == TCODE_WRITE_QUADLET_REQUEST) {
|
||||
spin_lock_irqsave(&card->lock, flags);
|
||||
card->split_timeout_lo =
|
||||
be32_to_cpu(*data) & 0xfff80000;
|
||||
guard(spinlock_irqsave)(&card->lock);
|
||||
|
||||
card->split_timeout_lo = be32_to_cpu(*data) & 0xfff80000;
|
||||
update_split_timeout(card);
|
||||
spin_unlock_irqrestore(&card->lock, flags);
|
||||
} else {
|
||||
rcode = RCODE_TYPE_ERROR;
|
||||
}
|
||||
@ -1387,7 +1358,7 @@ static void __exit fw_core_cleanup(void)
|
||||
unregister_chrdev(fw_cdev_major, "firewire");
|
||||
bus_unregister(&fw_bus_type);
|
||||
destroy_workqueue(fw_workqueue);
|
||||
idr_destroy(&fw_device_idr);
|
||||
xa_destroy(&fw_device_xa);
|
||||
}
|
||||
|
||||
module_init(fw_core_init);
|
||||
|
@ -7,7 +7,7 @@
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/xarray.h>
|
||||
#include <linux/mm_types.h>
|
||||
#include <linux/rwsem.h>
|
||||
#include <linux/slab.h>
|
||||
@ -115,8 +115,8 @@ struct fw_card_driver {
|
||||
|
||||
void fw_card_initialize(struct fw_card *card,
|
||||
const struct fw_card_driver *driver, struct device *device);
|
||||
int fw_card_add(struct fw_card *card,
|
||||
u32 max_receive, u32 link_speed, u64 guid);
|
||||
int fw_card_add(struct fw_card *card, u32 max_receive, u32 link_speed, u64 guid,
|
||||
unsigned int supported_isoc_contexts);
|
||||
void fw_core_remove_card(struct fw_card *card);
|
||||
int fw_compute_block_crc(__be32 *block);
|
||||
void fw_schedule_bm_work(struct fw_card *card, unsigned long delay);
|
||||
@ -133,7 +133,7 @@ void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p);
|
||||
/* -device */
|
||||
|
||||
extern struct rw_semaphore fw_device_rwsem;
|
||||
extern struct idr fw_device_idr;
|
||||
extern struct xarray fw_device_xa;
|
||||
extern int fw_cdev_major;
|
||||
|
||||
static inline struct fw_device *fw_device_get(struct fw_device *device)
|
||||
@ -159,6 +159,11 @@ int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count);
|
||||
int fw_iso_buffer_map_dma(struct fw_iso_buffer *buffer, struct fw_card *card,
|
||||
enum dma_data_direction direction);
|
||||
|
||||
static inline void fw_iso_context_init_work(struct fw_iso_context *ctx, work_func_t func)
|
||||
{
|
||||
INIT_WORK(&ctx->work, func);
|
||||
}
|
||||
|
||||
|
||||
/* -topology */
|
||||
|
||||
@ -183,7 +188,8 @@ struct fw_node {
|
||||
* local node to this node. */
|
||||
u8 max_depth:4; /* Maximum depth to any leaf node */
|
||||
u8 max_hops:4; /* Max hops in this sub tree */
|
||||
refcount_t ref_count;
|
||||
|
||||
struct kref kref;
|
||||
|
||||
/* For serializing node topology into a list. */
|
||||
struct list_head link;
|
||||
@ -196,15 +202,21 @@ struct fw_node {
|
||||
|
||||
static inline struct fw_node *fw_node_get(struct fw_node *node)
|
||||
{
|
||||
refcount_inc(&node->ref_count);
|
||||
kref_get(&node->kref);
|
||||
|
||||
return node;
|
||||
}
|
||||
|
||||
static void release_node(struct kref *kref)
|
||||
{
|
||||
struct fw_node *node = container_of(kref, struct fw_node, kref);
|
||||
|
||||
kfree(node);
|
||||
}
|
||||
|
||||
static inline void fw_node_put(struct fw_node *node)
|
||||
{
|
||||
if (refcount_dec_and_test(&node->ref_count))
|
||||
kfree(node);
|
||||
kref_put(&node->kref, release_node);
|
||||
}
|
||||
|
||||
void fw_core_handle_bus_reset(struct fw_card *card, int node_id,
|
||||
|
@ -40,9 +40,75 @@ static void test_self_id_receive_buffer_deserialization(struct kunit *test)
|
||||
KUNIT_EXPECT_EQ(test, 0xf38b, timestamp);
|
||||
}
|
||||
|
||||
static void test_at_data_serdes(struct kunit *test)
|
||||
{
|
||||
static const __le32 expected[] = {
|
||||
cpu_to_le32(0x00020e80),
|
||||
cpu_to_le32(0xffc2ffff),
|
||||
cpu_to_le32(0xe0000000),
|
||||
};
|
||||
__le32 quadlets[] = {0, 0, 0};
|
||||
bool has_src_bus_id = ohci1394_at_data_get_src_bus_id(expected);
|
||||
unsigned int speed = ohci1394_at_data_get_speed(expected);
|
||||
unsigned int tlabel = ohci1394_at_data_get_tlabel(expected);
|
||||
unsigned int retry = ohci1394_at_data_get_retry(expected);
|
||||
unsigned int tcode = ohci1394_at_data_get_tcode(expected);
|
||||
unsigned int destination_id = ohci1394_at_data_get_destination_id(expected);
|
||||
u64 destination_offset = ohci1394_at_data_get_destination_offset(expected);
|
||||
|
||||
KUNIT_EXPECT_FALSE(test, has_src_bus_id);
|
||||
KUNIT_EXPECT_EQ(test, 0x02, speed);
|
||||
KUNIT_EXPECT_EQ(test, 0x03, tlabel);
|
||||
KUNIT_EXPECT_EQ(test, 0x02, retry);
|
||||
KUNIT_EXPECT_EQ(test, 0x08, tcode);
|
||||
|
||||
ohci1394_at_data_set_src_bus_id(quadlets, has_src_bus_id);
|
||||
ohci1394_at_data_set_speed(quadlets, speed);
|
||||
ohci1394_at_data_set_tlabel(quadlets, tlabel);
|
||||
ohci1394_at_data_set_retry(quadlets, retry);
|
||||
ohci1394_at_data_set_tcode(quadlets, tcode);
|
||||
ohci1394_at_data_set_destination_id(quadlets, destination_id);
|
||||
ohci1394_at_data_set_destination_offset(quadlets, destination_offset);
|
||||
|
||||
KUNIT_EXPECT_MEMEQ(test, quadlets, expected, sizeof(expected));
|
||||
}
|
||||
|
||||
static void test_it_data_serdes(struct kunit *test)
|
||||
{
|
||||
static const __le32 expected[] = {
|
||||
cpu_to_le32(0x000349a7),
|
||||
cpu_to_le32(0x02300000),
|
||||
};
|
||||
__le32 quadlets[] = {0, 0};
|
||||
unsigned int scode = ohci1394_it_data_get_speed(expected);
|
||||
unsigned int tag = ohci1394_it_data_get_tag(expected);
|
||||
unsigned int channel = ohci1394_it_data_get_channel(expected);
|
||||
unsigned int tcode = ohci1394_it_data_get_tcode(expected);
|
||||
unsigned int sync = ohci1394_it_data_get_sync(expected);
|
||||
unsigned int data_length = ohci1394_it_data_get_data_length(expected);
|
||||
|
||||
KUNIT_EXPECT_EQ(test, 0x03, scode);
|
||||
KUNIT_EXPECT_EQ(test, 0x01, tag);
|
||||
KUNIT_EXPECT_EQ(test, 0x09, channel);
|
||||
KUNIT_EXPECT_EQ(test, 0x0a, tcode);
|
||||
KUNIT_EXPECT_EQ(test, 0x7, sync);
|
||||
KUNIT_EXPECT_EQ(test, 0x0230, data_length);
|
||||
|
||||
ohci1394_it_data_set_speed(quadlets, scode);
|
||||
ohci1394_it_data_set_tag(quadlets, tag);
|
||||
ohci1394_it_data_set_channel(quadlets, channel);
|
||||
ohci1394_it_data_set_tcode(quadlets, tcode);
|
||||
ohci1394_it_data_set_sync(quadlets, sync);
|
||||
ohci1394_it_data_set_data_length(quadlets, data_length);
|
||||
|
||||
KUNIT_EXPECT_MEMEQ(test, quadlets, expected, sizeof(expected));
|
||||
}
|
||||
|
||||
static struct kunit_case ohci_serdes_test_cases[] = {
|
||||
KUNIT_CASE(test_self_id_count_register_deserialization),
|
||||
KUNIT_CASE(test_self_id_receive_buffer_deserialization),
|
||||
KUNIT_CASE(test_at_data_serdes),
|
||||
KUNIT_CASE(test_it_data_serdes),
|
||||
{}
|
||||
};
|
||||
|
||||
|
@ -50,7 +50,6 @@ static u32 cond_le32_to_cpu(__le32 value, bool has_be_header_quirk);
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/firewire_ohci.h>
|
||||
|
||||
#define ohci_info(ohci, f, args...) dev_info(ohci->card.device, f, ##args)
|
||||
#define ohci_notice(ohci, f, args...) dev_notice(ohci->card.device, f, ##args)
|
||||
#define ohci_err(ohci, f, args...) dev_err(ohci->card.device, f, ##args)
|
||||
|
||||
@ -77,7 +76,7 @@ struct descriptor {
|
||||
__le32 branch_address;
|
||||
__le16 res_count;
|
||||
__le16 transfer_status;
|
||||
} __attribute__((aligned(16)));
|
||||
} __aligned(16);
|
||||
|
||||
#define CONTROL_SET(regs) (regs)
|
||||
#define CONTROL_CLEAR(regs) ((regs) + 4)
|
||||
@ -162,13 +161,6 @@ struct context {
|
||||
struct tasklet_struct tasklet;
|
||||
};
|
||||
|
||||
#define IT_HEADER_SY(v) ((v) << 0)
|
||||
#define IT_HEADER_TCODE(v) ((v) << 4)
|
||||
#define IT_HEADER_CHANNEL(v) ((v) << 8)
|
||||
#define IT_HEADER_TAG(v) ((v) << 14)
|
||||
#define IT_HEADER_SPEED(v) ((v) << 16)
|
||||
#define IT_HEADER_DATA_LENGTH(v) ((v) << 16)
|
||||
|
||||
struct iso_context {
|
||||
struct fw_iso_context base;
|
||||
struct context context;
|
||||
@ -182,7 +174,7 @@ struct iso_context {
|
||||
u8 tags;
|
||||
};
|
||||
|
||||
#define CONFIG_ROM_SIZE 1024
|
||||
#define CONFIG_ROM_SIZE (CSR_CONFIG_ROM_END - CSR_CONFIG_ROM)
|
||||
|
||||
struct fw_ohci {
|
||||
struct fw_card card;
|
||||
@ -264,7 +256,6 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card)
|
||||
#define OHCI1394_REGISTER_SIZE 0x800
|
||||
#define OHCI1394_PCI_HCI_Control 0x40
|
||||
#define SELF_ID_BUF_SIZE 0x800
|
||||
#define OHCI_TCODE_PHY_PACKET 0x0e
|
||||
#define OHCI_VERSION_1_1 0x010010
|
||||
|
||||
static char ohci_driver_name[] = KBUILD_MODNAME;
|
||||
@ -405,7 +396,7 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
|
||||
|
||||
static int param_debug;
|
||||
module_param_named(debug, param_debug, int, 0644);
|
||||
MODULE_PARM_DESC(debug, "Verbose logging (default = 0"
|
||||
MODULE_PARM_DESC(debug, "Verbose logging, deprecated in v6.11 kernel or later. (default = 0"
|
||||
", AT/AR events = " __stringify(OHCI_PARAM_DEBUG_AT_AR)
|
||||
", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS)
|
||||
", IRQs = " __stringify(OHCI_PARAM_DEBUG_IRQS)
|
||||
@ -532,20 +523,28 @@ static const char *evts[] = {
|
||||
[0x1e] = "ack_type_error", [0x1f] = "-reserved-",
|
||||
[0x20] = "pending/cancelled",
|
||||
};
|
||||
static const char *tcodes[] = {
|
||||
[0x0] = "QW req", [0x1] = "BW req",
|
||||
[0x2] = "W resp", [0x3] = "-reserved-",
|
||||
[0x4] = "QR req", [0x5] = "BR req",
|
||||
[0x6] = "QR resp", [0x7] = "BR resp",
|
||||
[0x8] = "cycle start", [0x9] = "Lk req",
|
||||
[0xa] = "async stream packet", [0xb] = "Lk resp",
|
||||
[0xc] = "-reserved-", [0xd] = "-reserved-",
|
||||
[0xe] = "link internal", [0xf] = "-reserved-",
|
||||
};
|
||||
|
||||
static void log_ar_at_event(struct fw_ohci *ohci,
|
||||
char dir, int speed, u32 *header, int evt)
|
||||
{
|
||||
static const char *const tcodes[] = {
|
||||
[TCODE_WRITE_QUADLET_REQUEST] = "QW req",
|
||||
[TCODE_WRITE_BLOCK_REQUEST] = "BW req",
|
||||
[TCODE_WRITE_RESPONSE] = "W resp",
|
||||
[0x3] = "-reserved-",
|
||||
[TCODE_READ_QUADLET_REQUEST] = "QR req",
|
||||
[TCODE_READ_BLOCK_REQUEST] = "BR req",
|
||||
[TCODE_READ_QUADLET_RESPONSE] = "QR resp",
|
||||
[TCODE_READ_BLOCK_RESPONSE] = "BR resp",
|
||||
[TCODE_CYCLE_START] = "cycle start",
|
||||
[TCODE_LOCK_REQUEST] = "Lk req",
|
||||
[TCODE_STREAM_DATA] = "async stream packet",
|
||||
[TCODE_LOCK_RESPONSE] = "Lk resp",
|
||||
[0xc] = "-reserved-",
|
||||
[0xd] = "-reserved-",
|
||||
[TCODE_LINK_INTERNAL] = "link internal",
|
||||
[0xf] = "-reserved-",
|
||||
};
|
||||
int tcode = async_header_get_tcode(header);
|
||||
char specific[12];
|
||||
|
||||
@ -586,7 +585,7 @@ static void log_ar_at_event(struct fw_ohci *ohci,
|
||||
ohci_notice(ohci, "A%c %s, %s\n",
|
||||
dir, evts[evt], tcodes[tcode]);
|
||||
break;
|
||||
case 0xe:
|
||||
case TCODE_LINK_INTERNAL:
|
||||
ohci_notice(ohci, "A%c %s, PHY %08x %08x\n",
|
||||
dir, evts[evt], header[1], header[2]);
|
||||
break;
|
||||
@ -713,26 +712,20 @@ static int read_paged_phy_reg(struct fw_ohci *ohci, int page, int addr)
|
||||
static int ohci_read_phy_reg(struct fw_card *card, int addr)
|
||||
{
|
||||
struct fw_ohci *ohci = fw_ohci(card);
|
||||
int ret;
|
||||
|
||||
mutex_lock(&ohci->phy_reg_mutex);
|
||||
ret = read_phy_reg(ohci, addr);
|
||||
mutex_unlock(&ohci->phy_reg_mutex);
|
||||
guard(mutex)(&ohci->phy_reg_mutex);
|
||||
|
||||
return ret;
|
||||
return read_phy_reg(ohci, addr);
|
||||
}
|
||||
|
||||
static int ohci_update_phy_reg(struct fw_card *card, int addr,
|
||||
int clear_bits, int set_bits)
|
||||
{
|
||||
struct fw_ohci *ohci = fw_ohci(card);
|
||||
int ret;
|
||||
|
||||
mutex_lock(&ohci->phy_reg_mutex);
|
||||
ret = update_phy_reg(ohci, addr, clear_bits, set_bits);
|
||||
mutex_unlock(&ohci->phy_reg_mutex);
|
||||
guard(mutex)(&ohci->phy_reg_mutex);
|
||||
|
||||
return ret;
|
||||
return update_phy_reg(ohci, addr, clear_bits, set_bits);
|
||||
}
|
||||
|
||||
static inline dma_addr_t ar_buffer_bus(struct ar_context *ctx, unsigned int i)
|
||||
@ -939,7 +932,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
|
||||
|
||||
case TCODE_WRITE_RESPONSE:
|
||||
case TCODE_READ_QUADLET_REQUEST:
|
||||
case OHCI_TCODE_PHY_PACKET:
|
||||
case TCODE_LINK_INTERNAL:
|
||||
p.header_length = 12;
|
||||
p.payload_length = 0;
|
||||
break;
|
||||
@ -967,7 +960,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
|
||||
* Several controllers, notably from NEC and VIA, forget to
|
||||
* write ack_complete status at PHY packet reception.
|
||||
*/
|
||||
if (evt == OHCI1394_evt_no_status && tcode == OHCI1394_phy_tcode)
|
||||
if (evt == OHCI1394_evt_no_status && tcode == TCODE_LINK_INTERNAL)
|
||||
p.ack = ACK_COMPLETE;
|
||||
|
||||
/*
|
||||
@ -1148,9 +1141,8 @@ static struct descriptor *find_branch_descriptor(struct descriptor *d, int z)
|
||||
return d + z - 1;
|
||||
}
|
||||
|
||||
static void context_tasklet(unsigned long data)
|
||||
static void context_retire_descriptors(struct context *ctx)
|
||||
{
|
||||
struct context *ctx = (struct context *) data;
|
||||
struct descriptor *d, *last;
|
||||
u32 address;
|
||||
int z;
|
||||
@ -1179,18 +1171,31 @@ static void context_tasklet(unsigned long data)
|
||||
break;
|
||||
|
||||
if (old_desc != desc) {
|
||||
/* If we've advanced to the next buffer, move the
|
||||
* previous buffer to the free list. */
|
||||
unsigned long flags;
|
||||
// If we've advanced to the next buffer, move the previous buffer to the
|
||||
// free list.
|
||||
old_desc->used = 0;
|
||||
spin_lock_irqsave(&ctx->ohci->lock, flags);
|
||||
guard(spinlock_irqsave)(&ctx->ohci->lock);
|
||||
list_move_tail(&old_desc->list, &ctx->buffer_list);
|
||||
spin_unlock_irqrestore(&ctx->ohci->lock, flags);
|
||||
}
|
||||
ctx->last = last;
|
||||
}
|
||||
}
|
||||
|
||||
static void context_tasklet(unsigned long data)
|
||||
{
|
||||
struct context *ctx = (struct context *) data;
|
||||
|
||||
context_retire_descriptors(ctx);
|
||||
}
|
||||
|
||||
static void ohci_isoc_context_work(struct work_struct *work)
|
||||
{
|
||||
struct fw_iso_context *base = container_of(work, struct fw_iso_context, work);
|
||||
struct iso_context *isoc_ctx = container_of(base, struct iso_context, base);
|
||||
|
||||
context_retire_descriptors(&isoc_ctx->context);
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate a new buffer and add it to the list of free buffers for this
|
||||
* context. Must be called with ohci->lock held.
|
||||
@ -1402,12 +1407,6 @@ static int at_context_queue_packet(struct context *ctx,
|
||||
d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
|
||||
d[0].res_count = cpu_to_le16(packet->timestamp);
|
||||
|
||||
/*
|
||||
* The DMA format for asynchronous link packets is different
|
||||
* from the IEEE1394 layout, so shift the fields around
|
||||
* accordingly.
|
||||
*/
|
||||
|
||||
tcode = async_header_get_tcode(packet->header);
|
||||
header = (__le32 *) &d[1];
|
||||
switch (tcode) {
|
||||
@ -1420,11 +1419,21 @@ static int at_context_queue_packet(struct context *ctx,
|
||||
case TCODE_READ_BLOCK_RESPONSE:
|
||||
case TCODE_LOCK_REQUEST:
|
||||
case TCODE_LOCK_RESPONSE:
|
||||
header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
|
||||
(packet->speed << 16));
|
||||
header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
|
||||
(packet->header[0] & 0xffff0000));
|
||||
header[2] = cpu_to_le32(packet->header[2]);
|
||||
ohci1394_at_data_set_src_bus_id(header, false);
|
||||
ohci1394_at_data_set_speed(header, packet->speed);
|
||||
ohci1394_at_data_set_tlabel(header, async_header_get_tlabel(packet->header));
|
||||
ohci1394_at_data_set_retry(header, async_header_get_retry(packet->header));
|
||||
ohci1394_at_data_set_tcode(header, tcode);
|
||||
|
||||
ohci1394_at_data_set_destination_id(header,
|
||||
async_header_get_destination(packet->header));
|
||||
|
||||
if (ctx == &ctx->ohci->at_response_ctx) {
|
||||
ohci1394_at_data_set_rcode(header, async_header_get_rcode(packet->header));
|
||||
} else {
|
||||
ohci1394_at_data_set_destination_offset(header,
|
||||
async_header_get_offset(packet->header));
|
||||
}
|
||||
|
||||
if (tcode_is_block_packet(tcode))
|
||||
header[3] = cpu_to_le32(packet->header[3]);
|
||||
@ -1433,10 +1442,10 @@ static int at_context_queue_packet(struct context *ctx,
|
||||
|
||||
d[0].req_count = cpu_to_le16(packet->header_length);
|
||||
break;
|
||||
|
||||
case TCODE_LINK_INTERNAL:
|
||||
header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
|
||||
(packet->speed << 16));
|
||||
ohci1394_at_data_set_speed(header, packet->speed);
|
||||
ohci1394_at_data_set_tcode(header, TCODE_LINK_INTERNAL);
|
||||
|
||||
header[1] = cpu_to_le32(packet->header[1]);
|
||||
header[2] = cpu_to_le32(packet->header[2]);
|
||||
d[0].req_count = cpu_to_le16(12);
|
||||
@ -1446,9 +1455,14 @@ static int at_context_queue_packet(struct context *ctx,
|
||||
break;
|
||||
|
||||
case TCODE_STREAM_DATA:
|
||||
header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
|
||||
(packet->speed << 16));
|
||||
header[1] = cpu_to_le32(packet->header[0] & 0xffff0000);
|
||||
ohci1394_it_data_set_speed(header, packet->speed);
|
||||
ohci1394_it_data_set_tag(header, isoc_header_get_tag(packet->header[0]));
|
||||
ohci1394_it_data_set_channel(header, isoc_header_get_channel(packet->header[0]));
|
||||
ohci1394_it_data_set_tcode(header, TCODE_STREAM_DATA);
|
||||
ohci1394_it_data_set_sync(header, isoc_header_get_sy(packet->header[0]));
|
||||
|
||||
ohci1394_it_data_set_data_length(header, isoc_header_get_data_length(packet->header[0]));
|
||||
|
||||
d[0].req_count = cpu_to_le16(8);
|
||||
break;
|
||||
|
||||
@ -1873,13 +1887,15 @@ static int get_status_for_port(struct fw_ohci *ohci, int port_index,
|
||||
{
|
||||
int reg;
|
||||
|
||||
mutex_lock(&ohci->phy_reg_mutex);
|
||||
reg = write_phy_reg(ohci, 7, port_index);
|
||||
if (reg >= 0)
|
||||
scoped_guard(mutex, &ohci->phy_reg_mutex) {
|
||||
reg = write_phy_reg(ohci, 7, port_index);
|
||||
if (reg < 0)
|
||||
return reg;
|
||||
|
||||
reg = read_phy_reg(ohci, 8);
|
||||
mutex_unlock(&ohci->phy_reg_mutex);
|
||||
if (reg < 0)
|
||||
return reg;
|
||||
if (reg < 0)
|
||||
return reg;
|
||||
}
|
||||
|
||||
switch (reg & 0x0f) {
|
||||
case 0x06:
|
||||
@ -1917,29 +1933,36 @@ static int get_self_id_pos(struct fw_ohci *ohci, u32 self_id,
|
||||
return i;
|
||||
}
|
||||
|
||||
static bool initiated_reset(struct fw_ohci *ohci)
|
||||
static int detect_initiated_reset(struct fw_ohci *ohci, bool *is_initiated_reset)
|
||||
{
|
||||
int reg;
|
||||
int ret = false;
|
||||
|
||||
mutex_lock(&ohci->phy_reg_mutex);
|
||||
reg = write_phy_reg(ohci, 7, 0xe0); /* Select page 7 */
|
||||
if (reg >= 0) {
|
||||
reg = read_phy_reg(ohci, 8);
|
||||
reg |= 0x40;
|
||||
reg = write_phy_reg(ohci, 8, reg); /* set PMODE bit */
|
||||
if (reg >= 0) {
|
||||
reg = read_phy_reg(ohci, 12); /* read register 12 */
|
||||
if (reg >= 0) {
|
||||
if ((reg & 0x08) == 0x08) {
|
||||
/* bit 3 indicates "initiated reset" */
|
||||
ret = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
mutex_unlock(&ohci->phy_reg_mutex);
|
||||
return ret;
|
||||
guard(mutex)(&ohci->phy_reg_mutex);
|
||||
|
||||
// Select page 7
|
||||
reg = write_phy_reg(ohci, 7, 0xe0);
|
||||
if (reg < 0)
|
||||
return reg;
|
||||
|
||||
reg = read_phy_reg(ohci, 8);
|
||||
if (reg < 0)
|
||||
return reg;
|
||||
|
||||
// set PMODE bit
|
||||
reg |= 0x40;
|
||||
reg = write_phy_reg(ohci, 8, reg);
|
||||
if (reg < 0)
|
||||
return reg;
|
||||
|
||||
// read register 12
|
||||
reg = read_phy_reg(ohci, 12);
|
||||
if (reg < 0)
|
||||
return reg;
|
||||
|
||||
// bit 3 indicates "initiated reset"
|
||||
*is_initiated_reset = !!((reg & 0x08) == 0x08);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1949,7 +1972,8 @@ static bool initiated_reset(struct fw_ohci *ohci)
|
||||
*/
|
||||
static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count)
|
||||
{
|
||||
int reg, i, pos;
|
||||
int reg, i, pos, err;
|
||||
bool is_initiated_reset;
|
||||
u32 self_id = 0;
|
||||
|
||||
// link active 1, speed 3, bridge 0, contender 1, more packets 0.
|
||||
@ -1978,7 +2002,6 @@ static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count)
|
||||
|
||||
for (i = 0; i < 3; i++) {
|
||||
enum phy_packet_self_id_port_status status;
|
||||
int err;
|
||||
|
||||
err = get_status_for_port(ohci, i, &status);
|
||||
if (err < 0)
|
||||
@ -1987,7 +2010,10 @@ static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count)
|
||||
self_id_sequence_set_port_status(&self_id, 1, i, status);
|
||||
}
|
||||
|
||||
phy_packet_self_id_zero_set_initiated_reset(&self_id, initiated_reset(ohci));
|
||||
err = detect_initiated_reset(ohci, &is_initiated_reset);
|
||||
if (err < 0)
|
||||
return err;
|
||||
phy_packet_self_id_zero_set_initiated_reset(&self_id, is_initiated_reset);
|
||||
|
||||
pos = get_self_id_pos(ohci, self_id, self_id_count);
|
||||
if (pos >= 0) {
|
||||
@ -2112,14 +2138,12 @@ static void bus_reset_work(struct work_struct *work)
|
||||
return;
|
||||
}
|
||||
|
||||
/* FIXME: Document how the locking works. */
|
||||
spin_lock_irq(&ohci->lock);
|
||||
|
||||
ohci->generation = -1; /* prevent AT packet queueing */
|
||||
context_stop(&ohci->at_request_ctx);
|
||||
context_stop(&ohci->at_response_ctx);
|
||||
|
||||
spin_unlock_irq(&ohci->lock);
|
||||
// FIXME: Document how the locking works.
|
||||
scoped_guard(spinlock_irq, &ohci->lock) {
|
||||
ohci->generation = -1; // prevent AT packet queueing
|
||||
context_stop(&ohci->at_request_ctx);
|
||||
context_stop(&ohci->at_response_ctx);
|
||||
}
|
||||
|
||||
/*
|
||||
* Per OHCI 1.2 draft, clause 7.2.3.3, hardware may leave unsent
|
||||
@ -2129,53 +2153,42 @@ static void bus_reset_work(struct work_struct *work)
|
||||
at_context_flush(&ohci->at_request_ctx);
|
||||
at_context_flush(&ohci->at_response_ctx);
|
||||
|
||||
spin_lock_irq(&ohci->lock);
|
||||
scoped_guard(spinlock_irq, &ohci->lock) {
|
||||
ohci->generation = generation;
|
||||
reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
|
||||
reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
|
||||
|
||||
ohci->generation = generation;
|
||||
reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
|
||||
reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
|
||||
if (ohci->quirks & QUIRK_RESET_PACKET)
|
||||
ohci->request_generation = generation;
|
||||
|
||||
if (ohci->quirks & QUIRK_RESET_PACKET)
|
||||
ohci->request_generation = generation;
|
||||
// This next bit is unrelated to the AT context stuff but we have to do it under the
|
||||
// spinlock also. If a new config rom was set up before this reset, the old one is
|
||||
// now no longer in use and we can free it. Update the config rom pointers to point
|
||||
// to the current config rom and clear the next_config_rom pointer so a new update
|
||||
// can take place.
|
||||
if (ohci->next_config_rom != NULL) {
|
||||
if (ohci->next_config_rom != ohci->config_rom) {
|
||||
free_rom = ohci->config_rom;
|
||||
free_rom_bus = ohci->config_rom_bus;
|
||||
}
|
||||
ohci->config_rom = ohci->next_config_rom;
|
||||
ohci->config_rom_bus = ohci->next_config_rom_bus;
|
||||
ohci->next_config_rom = NULL;
|
||||
|
||||
/*
|
||||
* This next bit is unrelated to the AT context stuff but we
|
||||
* have to do it under the spinlock also. If a new config rom
|
||||
* was set up before this reset, the old one is now no longer
|
||||
* in use and we can free it. Update the config rom pointers
|
||||
* to point to the current config rom and clear the
|
||||
* next_config_rom pointer so a new update can take place.
|
||||
*/
|
||||
|
||||
if (ohci->next_config_rom != NULL) {
|
||||
if (ohci->next_config_rom != ohci->config_rom) {
|
||||
free_rom = ohci->config_rom;
|
||||
free_rom_bus = ohci->config_rom_bus;
|
||||
// Restore config_rom image and manually update config_rom registers.
|
||||
// Writing the header quadlet will indicate that the config rom is ready,
|
||||
// so we do that last.
|
||||
reg_write(ohci, OHCI1394_BusOptions, be32_to_cpu(ohci->config_rom[2]));
|
||||
ohci->config_rom[0] = ohci->next_header;
|
||||
reg_write(ohci, OHCI1394_ConfigROMhdr, be32_to_cpu(ohci->next_header));
|
||||
}
|
||||
ohci->config_rom = ohci->next_config_rom;
|
||||
ohci->config_rom_bus = ohci->next_config_rom_bus;
|
||||
ohci->next_config_rom = NULL;
|
||||
|
||||
/*
|
||||
* Restore config_rom image and manually update
|
||||
* config_rom registers. Writing the header quadlet
|
||||
* will indicate that the config rom is ready, so we
|
||||
* do that last.
|
||||
*/
|
||||
reg_write(ohci, OHCI1394_BusOptions,
|
||||
be32_to_cpu(ohci->config_rom[2]));
|
||||
ohci->config_rom[0] = ohci->next_header;
|
||||
reg_write(ohci, OHCI1394_ConfigROMhdr,
|
||||
be32_to_cpu(ohci->next_header));
|
||||
if (param_remote_dma) {
|
||||
reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
|
||||
reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
|
||||
}
|
||||
}
|
||||
|
||||
if (param_remote_dma) {
|
||||
reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
|
||||
reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
|
||||
}
|
||||
|
||||
spin_unlock_irq(&ohci->lock);
|
||||
|
||||
if (free_rom)
|
||||
dmam_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, free_rom, free_rom_bus);
|
||||
|
||||
@ -2198,6 +2211,11 @@ static irqreturn_t irq_handler(int irq, void *data)
|
||||
if (!event || !~event)
|
||||
return IRQ_NONE;
|
||||
|
||||
if (unlikely(param_debug > 0)) {
|
||||
dev_notice_ratelimited(ohci->card.device,
|
||||
"The debug parameter is superceded by tracepoints events, and deprecated.");
|
||||
}
|
||||
|
||||
/*
|
||||
* busReset and postedWriteErr events must not be cleared yet
|
||||
* (OHCI 1.1 clauses 7.2.3.2 and 13.2.8.1)
|
||||
@ -2238,8 +2256,7 @@ static irqreturn_t irq_handler(int irq, void *data)
|
||||
|
||||
while (iso_event) {
|
||||
i = ffs(iso_event) - 1;
|
||||
tasklet_schedule(
|
||||
&ohci->ir_context_list[i].context.tasklet);
|
||||
fw_iso_context_schedule_flush_completions(&ohci->ir_context_list[i].base);
|
||||
iso_event &= ~(1 << i);
|
||||
}
|
||||
}
|
||||
@ -2250,8 +2267,7 @@ static irqreturn_t irq_handler(int irq, void *data)
|
||||
|
||||
while (iso_event) {
|
||||
i = ffs(iso_event) - 1;
|
||||
tasklet_schedule(
|
||||
&ohci->it_context_list[i].context.tasklet);
|
||||
fw_iso_context_schedule_flush_completions(&ohci->it_context_list[i].base);
|
||||
iso_event &= ~(1 << i);
|
||||
}
|
||||
}
|
||||
@ -2264,13 +2280,11 @@ static irqreturn_t irq_handler(int irq, void *data)
|
||||
reg_read(ohci, OHCI1394_PostedWriteAddressLo);
|
||||
reg_write(ohci, OHCI1394_IntEventClear,
|
||||
OHCI1394_postedWriteErr);
|
||||
if (printk_ratelimit())
|
||||
ohci_err(ohci, "PCI posted write error\n");
|
||||
dev_err_ratelimited(ohci->card.device, "PCI posted write error\n");
|
||||
}
|
||||
|
||||
if (unlikely(event & OHCI1394_cycleTooLong)) {
|
||||
if (printk_ratelimit())
|
||||
ohci_notice(ohci, "isochronous cycle too long\n");
|
||||
dev_notice_ratelimited(ohci->card.device, "isochronous cycle too long\n");
|
||||
reg_write(ohci, OHCI1394_LinkControlSet,
|
||||
OHCI1394_LinkControl_cycleMaster);
|
||||
}
|
||||
@ -2282,17 +2296,15 @@ static irqreturn_t irq_handler(int irq, void *data)
|
||||
* stop active cycleMatch iso contexts now and restart
|
||||
* them at least two cycles later. (FIXME?)
|
||||
*/
|
||||
if (printk_ratelimit())
|
||||
ohci_notice(ohci, "isochronous cycle inconsistent\n");
|
||||
dev_notice_ratelimited(ohci->card.device, "isochronous cycle inconsistent\n");
|
||||
}
|
||||
|
||||
if (unlikely(event & OHCI1394_unrecoverableError))
|
||||
handle_dead_contexts(ohci);
|
||||
|
||||
if (event & OHCI1394_cycle64Seconds) {
|
||||
spin_lock(&ohci->lock);
|
||||
guard(spinlock)(&ohci->lock);
|
||||
update_bus_time(ohci);
|
||||
spin_unlock(&ohci->lock);
|
||||
} else
|
||||
flush_writes(ohci);
|
||||
|
||||
@ -2617,34 +2629,27 @@ static int ohci_set_config_rom(struct fw_card *card,
|
||||
if (next_config_rom == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_irq(&ohci->lock);
|
||||
scoped_guard(spinlock_irq, &ohci->lock) {
|
||||
// If there is not an already pending config_rom update, push our new allocation
|
||||
// into the ohci->next_config_rom and then mark the local variable as null so that
|
||||
// we won't deallocate the new buffer.
|
||||
//
|
||||
// OTOH, if there is a pending config_rom update, just use that buffer with the new
|
||||
// config_rom data, and let this routine free the unused DMA allocation.
|
||||
if (ohci->next_config_rom == NULL) {
|
||||
ohci->next_config_rom = next_config_rom;
|
||||
ohci->next_config_rom_bus = next_config_rom_bus;
|
||||
next_config_rom = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* If there is not an already pending config_rom update,
|
||||
* push our new allocation into the ohci->next_config_rom
|
||||
* and then mark the local variable as null so that we
|
||||
* won't deallocate the new buffer.
|
||||
*
|
||||
* OTOH, if there is a pending config_rom update, just
|
||||
* use that buffer with the new config_rom data, and
|
||||
* let this routine free the unused DMA allocation.
|
||||
*/
|
||||
copy_config_rom(ohci->next_config_rom, config_rom, length);
|
||||
|
||||
if (ohci->next_config_rom == NULL) {
|
||||
ohci->next_config_rom = next_config_rom;
|
||||
ohci->next_config_rom_bus = next_config_rom_bus;
|
||||
next_config_rom = NULL;
|
||||
ohci->next_header = config_rom[0];
|
||||
ohci->next_config_rom[0] = 0;
|
||||
|
||||
reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
|
||||
}
|
||||
|
||||
copy_config_rom(ohci->next_config_rom, config_rom, length);
|
||||
|
||||
ohci->next_header = config_rom[0];
|
||||
ohci->next_config_rom[0] = 0;
|
||||
|
||||
reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
|
||||
|
||||
spin_unlock_irq(&ohci->lock);
|
||||
|
||||
/* If we didn't use the DMA allocation, delete it. */
|
||||
if (next_config_rom != NULL) {
|
||||
dmam_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, next_config_rom,
|
||||
@ -2713,7 +2718,6 @@ static int ohci_enable_phys_dma(struct fw_card *card,
|
||||
int node_id, int generation)
|
||||
{
|
||||
struct fw_ohci *ohci = fw_ohci(card);
|
||||
unsigned long flags;
|
||||
int n, ret = 0;
|
||||
|
||||
if (param_remote_dma)
|
||||
@ -2724,12 +2728,10 @@ static int ohci_enable_phys_dma(struct fw_card *card,
|
||||
* interrupt bit. Clear physReqResourceAllBuses on bus reset.
|
||||
*/
|
||||
|
||||
spin_lock_irqsave(&ohci->lock, flags);
|
||||
guard(spinlock_irqsave)(&ohci->lock);
|
||||
|
||||
if (ohci->generation != generation) {
|
||||
ret = -ESTALE;
|
||||
goto out;
|
||||
}
|
||||
if (ohci->generation != generation)
|
||||
return -ESTALE;
|
||||
|
||||
/*
|
||||
* Note, if the node ID contains a non-local bus ID, physical DMA is
|
||||
@ -2743,8 +2745,6 @@ static int ohci_enable_phys_dma(struct fw_card *card,
|
||||
reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32));
|
||||
|
||||
flush_writes(ohci);
|
||||
out:
|
||||
spin_unlock_irqrestore(&ohci->lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -2752,7 +2752,6 @@ static int ohci_enable_phys_dma(struct fw_card *card,
|
||||
static u32 ohci_read_csr(struct fw_card *card, int csr_offset)
|
||||
{
|
||||
struct fw_ohci *ohci = fw_ohci(card);
|
||||
unsigned long flags;
|
||||
u32 value;
|
||||
|
||||
switch (csr_offset) {
|
||||
@ -2776,16 +2775,14 @@ static u32 ohci_read_csr(struct fw_card *card, int csr_offset)
|
||||
return get_cycle_time(ohci);
|
||||
|
||||
case CSR_BUS_TIME:
|
||||
/*
|
||||
* We might be called just after the cycle timer has wrapped
|
||||
* around but just before the cycle64Seconds handler, so we
|
||||
* better check here, too, if the bus time needs to be updated.
|
||||
*/
|
||||
spin_lock_irqsave(&ohci->lock, flags);
|
||||
value = update_bus_time(ohci);
|
||||
spin_unlock_irqrestore(&ohci->lock, flags);
|
||||
return value;
|
||||
{
|
||||
// We might be called just after the cycle timer has wrapped around but just before
|
||||
// the cycle64Seconds handler, so we better check here, too, if the bus time needs
|
||||
// to be updated.
|
||||
|
||||
guard(spinlock_irqsave)(&ohci->lock);
|
||||
return update_bus_time(ohci);
|
||||
}
|
||||
case CSR_BUSY_TIMEOUT:
|
||||
value = reg_read(ohci, OHCI1394_ATRetries);
|
||||
return (value >> 4) & 0x0ffff00f;
|
||||
@ -2803,7 +2800,6 @@ static u32 ohci_read_csr(struct fw_card *card, int csr_offset)
|
||||
static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value)
|
||||
{
|
||||
struct fw_ohci *ohci = fw_ohci(card);
|
||||
unsigned long flags;
|
||||
|
||||
switch (csr_offset) {
|
||||
case CSR_STATE_CLEAR:
|
||||
@ -2839,12 +2835,11 @@ static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value)
|
||||
break;
|
||||
|
||||
case CSR_BUS_TIME:
|
||||
spin_lock_irqsave(&ohci->lock, flags);
|
||||
ohci->bus_time = (update_bus_time(ohci) & 0x40) |
|
||||
(value & ~0x7f);
|
||||
spin_unlock_irqrestore(&ohci->lock, flags);
|
||||
{
|
||||
guard(spinlock_irqsave)(&ohci->lock);
|
||||
ohci->bus_time = (update_bus_time(ohci) & 0x40) | (value & ~0x7f);
|
||||
break;
|
||||
|
||||
}
|
||||
case CSR_BUSY_TIMEOUT:
|
||||
value = (value & 0xf) | ((value & 0xf) << 4) |
|
||||
((value & 0xf) << 8) | ((value & 0x0ffff000) << 4);
|
||||
@ -2932,7 +2927,7 @@ static int handle_ir_packet_per_buffer(struct context *context,
|
||||
copy_iso_headers(ctx, (u32 *) (last + 1));
|
||||
|
||||
if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS))
|
||||
flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_IRQ);
|
||||
flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_INTERRUPT);
|
||||
|
||||
return 1;
|
||||
}
|
||||
@ -2968,7 +2963,7 @@ static int handle_ir_buffer_fill(struct context *context,
|
||||
|
||||
if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) {
|
||||
trace_isoc_inbound_multiple_completions(&ctx->base, completed,
|
||||
FW_ISO_CONTEXT_COMPLETIONS_CAUSE_IRQ);
|
||||
FW_ISO_CONTEXT_COMPLETIONS_CAUSE_INTERRUPT);
|
||||
|
||||
ctx->base.callback.mc(&ctx->base,
|
||||
buffer_dma + completed,
|
||||
@ -3064,7 +3059,7 @@ static int handle_it_packet(struct context *context,
|
||||
ctx->header_length += 4;
|
||||
|
||||
if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS))
|
||||
flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_IRQ);
|
||||
flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_INTERRUPT);
|
||||
|
||||
return 1;
|
||||
}
|
||||
@ -3090,55 +3085,53 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
|
||||
u32 *mask, regs;
|
||||
int index, ret = -EBUSY;
|
||||
|
||||
spin_lock_irq(&ohci->lock);
|
||||
scoped_guard(spinlock_irq, &ohci->lock) {
|
||||
switch (type) {
|
||||
case FW_ISO_CONTEXT_TRANSMIT:
|
||||
mask = &ohci->it_context_mask;
|
||||
callback = handle_it_packet;
|
||||
index = ffs(*mask) - 1;
|
||||
if (index >= 0) {
|
||||
*mask &= ~(1 << index);
|
||||
regs = OHCI1394_IsoXmitContextBase(index);
|
||||
ctx = &ohci->it_context_list[index];
|
||||
}
|
||||
break;
|
||||
|
||||
switch (type) {
|
||||
case FW_ISO_CONTEXT_TRANSMIT:
|
||||
mask = &ohci->it_context_mask;
|
||||
callback = handle_it_packet;
|
||||
index = ffs(*mask) - 1;
|
||||
if (index >= 0) {
|
||||
*mask &= ~(1 << index);
|
||||
regs = OHCI1394_IsoXmitContextBase(index);
|
||||
ctx = &ohci->it_context_list[index];
|
||||
case FW_ISO_CONTEXT_RECEIVE:
|
||||
channels = &ohci->ir_context_channels;
|
||||
mask = &ohci->ir_context_mask;
|
||||
callback = handle_ir_packet_per_buffer;
|
||||
index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1;
|
||||
if (index >= 0) {
|
||||
*channels &= ~(1ULL << channel);
|
||||
*mask &= ~(1 << index);
|
||||
regs = OHCI1394_IsoRcvContextBase(index);
|
||||
ctx = &ohci->ir_context_list[index];
|
||||
}
|
||||
break;
|
||||
|
||||
case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
|
||||
mask = &ohci->ir_context_mask;
|
||||
callback = handle_ir_buffer_fill;
|
||||
index = !ohci->mc_allocated ? ffs(*mask) - 1 : -1;
|
||||
if (index >= 0) {
|
||||
ohci->mc_allocated = true;
|
||||
*mask &= ~(1 << index);
|
||||
regs = OHCI1394_IsoRcvContextBase(index);
|
||||
ctx = &ohci->ir_context_list[index];
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
index = -1;
|
||||
ret = -ENOSYS;
|
||||
}
|
||||
break;
|
||||
|
||||
case FW_ISO_CONTEXT_RECEIVE:
|
||||
channels = &ohci->ir_context_channels;
|
||||
mask = &ohci->ir_context_mask;
|
||||
callback = handle_ir_packet_per_buffer;
|
||||
index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1;
|
||||
if (index >= 0) {
|
||||
*channels &= ~(1ULL << channel);
|
||||
*mask &= ~(1 << index);
|
||||
regs = OHCI1394_IsoRcvContextBase(index);
|
||||
ctx = &ohci->ir_context_list[index];
|
||||
}
|
||||
break;
|
||||
|
||||
case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
|
||||
mask = &ohci->ir_context_mask;
|
||||
callback = handle_ir_buffer_fill;
|
||||
index = !ohci->mc_allocated ? ffs(*mask) - 1 : -1;
|
||||
if (index >= 0) {
|
||||
ohci->mc_allocated = true;
|
||||
*mask &= ~(1 << index);
|
||||
regs = OHCI1394_IsoRcvContextBase(index);
|
||||
ctx = &ohci->ir_context_list[index];
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
index = -1;
|
||||
ret = -ENOSYS;
|
||||
if (index < 0)
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
spin_unlock_irq(&ohci->lock);
|
||||
|
||||
if (index < 0)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
memset(ctx, 0, sizeof(*ctx));
|
||||
ctx->header_length = 0;
|
||||
ctx->header = (void *) __get_free_page(GFP_KERNEL);
|
||||
@ -3149,6 +3142,7 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
|
||||
ret = context_init(&ctx->context, ohci, regs, callback);
|
||||
if (ret < 0)
|
||||
goto out_with_header;
|
||||
fw_iso_context_init_work(&ctx->base, ohci_isoc_context_work);
|
||||
|
||||
if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) {
|
||||
set_multichannel_mask(ohci, 0);
|
||||
@ -3160,20 +3154,18 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
|
||||
out_with_header:
|
||||
free_page((unsigned long)ctx->header);
|
||||
out:
|
||||
spin_lock_irq(&ohci->lock);
|
||||
scoped_guard(spinlock_irq, &ohci->lock) {
|
||||
switch (type) {
|
||||
case FW_ISO_CONTEXT_RECEIVE:
|
||||
*channels |= 1ULL << channel;
|
||||
break;
|
||||
|
||||
switch (type) {
|
||||
case FW_ISO_CONTEXT_RECEIVE:
|
||||
*channels |= 1ULL << channel;
|
||||
break;
|
||||
|
||||
case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
|
||||
ohci->mc_allocated = false;
|
||||
break;
|
||||
case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
|
||||
ohci->mc_allocated = false;
|
||||
break;
|
||||
}
|
||||
*mask |= 1 << index;
|
||||
}
|
||||
*mask |= 1 << index;
|
||||
|
||||
spin_unlock_irq(&ohci->lock);
|
||||
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
@ -3248,7 +3240,6 @@ static int ohci_stop_iso(struct fw_iso_context *base)
|
||||
}
|
||||
flush_writes(ohci);
|
||||
context_stop(&ctx->context);
|
||||
tasklet_kill(&ctx->context.tasklet);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -3257,14 +3248,13 @@ static void ohci_free_iso_context(struct fw_iso_context *base)
|
||||
{
|
||||
struct fw_ohci *ohci = fw_ohci(base->card);
|
||||
struct iso_context *ctx = container_of(base, struct iso_context, base);
|
||||
unsigned long flags;
|
||||
int index;
|
||||
|
||||
ohci_stop_iso(base);
|
||||
context_release(&ctx->context);
|
||||
free_page((unsigned long)ctx->header);
|
||||
|
||||
spin_lock_irqsave(&ohci->lock, flags);
|
||||
guard(spinlock_irqsave)(&ohci->lock);
|
||||
|
||||
switch (base->type) {
|
||||
case FW_ISO_CONTEXT_TRANSMIT:
|
||||
@ -3286,38 +3276,29 @@ static void ohci_free_iso_context(struct fw_iso_context *base)
|
||||
ohci->mc_allocated = false;
|
||||
break;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&ohci->lock, flags);
|
||||
}
|
||||
|
||||
static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels)
|
||||
{
|
||||
struct fw_ohci *ohci = fw_ohci(base->card);
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
switch (base->type) {
|
||||
case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
|
||||
{
|
||||
guard(spinlock_irqsave)(&ohci->lock);
|
||||
|
||||
spin_lock_irqsave(&ohci->lock, flags);
|
||||
|
||||
/* Don't allow multichannel to grab other contexts' channels. */
|
||||
// Don't allow multichannel to grab other contexts' channels.
|
||||
if (~ohci->ir_context_channels & ~ohci->mc_channels & *channels) {
|
||||
*channels = ohci->ir_context_channels;
|
||||
ret = -EBUSY;
|
||||
return -EBUSY;
|
||||
} else {
|
||||
set_multichannel_mask(ohci, *channels);
|
||||
ret = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&ohci->lock, flags);
|
||||
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
@ -3392,14 +3373,14 @@ static int queue_iso_transmit(struct iso_context *ctx,
|
||||
d[0].branch_address = cpu_to_le32(d_bus | z);
|
||||
|
||||
header = (__le32 *) &d[1];
|
||||
header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
|
||||
IT_HEADER_TAG(p->tag) |
|
||||
IT_HEADER_TCODE(TCODE_STREAM_DATA) |
|
||||
IT_HEADER_CHANNEL(ctx->base.channel) |
|
||||
IT_HEADER_SPEED(ctx->base.speed));
|
||||
header[1] =
|
||||
cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length +
|
||||
p->payload_length));
|
||||
|
||||
ohci1394_it_data_set_speed(header, ctx->base.speed);
|
||||
ohci1394_it_data_set_tag(header, p->tag);
|
||||
ohci1394_it_data_set_channel(header, ctx->base.channel);
|
||||
ohci1394_it_data_set_tcode(header, TCODE_STREAM_DATA);
|
||||
ohci1394_it_data_set_sync(header, p->sy);
|
||||
|
||||
ohci1394_it_data_set_data_length(header, p->header_length + p->payload_length);
|
||||
}
|
||||
|
||||
if (p->header_length > 0) {
|
||||
@ -3587,24 +3568,19 @@ static int ohci_queue_iso(struct fw_iso_context *base,
|
||||
unsigned long payload)
|
||||
{
|
||||
struct iso_context *ctx = container_of(base, struct iso_context, base);
|
||||
unsigned long flags;
|
||||
int ret = -ENOSYS;
|
||||
|
||||
spin_lock_irqsave(&ctx->context.ohci->lock, flags);
|
||||
guard(spinlock_irqsave)(&ctx->context.ohci->lock);
|
||||
|
||||
switch (base->type) {
|
||||
case FW_ISO_CONTEXT_TRANSMIT:
|
||||
ret = queue_iso_transmit(ctx, packet, buffer, payload);
|
||||
break;
|
||||
return queue_iso_transmit(ctx, packet, buffer, payload);
|
||||
case FW_ISO_CONTEXT_RECEIVE:
|
||||
ret = queue_iso_packet_per_buffer(ctx, packet, buffer, payload);
|
||||
break;
|
||||
return queue_iso_packet_per_buffer(ctx, packet, buffer, payload);
|
||||
case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
|
||||
ret = queue_iso_buffer_fill(ctx, packet, buffer, payload);
|
||||
break;
|
||||
return queue_iso_buffer_fill(ctx, packet, buffer, payload);
|
||||
default:
|
||||
return -ENOSYS;
|
||||
}
|
||||
spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ohci_flush_queue_iso(struct fw_iso_context *base)
|
||||
@ -3620,10 +3596,8 @@ static int ohci_flush_iso_completions(struct fw_iso_context *base)
|
||||
struct iso_context *ctx = container_of(base, struct iso_context, base);
|
||||
int ret = 0;
|
||||
|
||||
tasklet_disable_in_atomic(&ctx->context.tasklet);
|
||||
|
||||
if (!test_and_set_bit_lock(0, &ctx->flushing_completions)) {
|
||||
context_tasklet((unsigned long)&ctx->context);
|
||||
ohci_isoc_context_work(&base->work);
|
||||
|
||||
switch (base->type) {
|
||||
case FW_ISO_CONTEXT_TRANSMIT:
|
||||
@ -3643,8 +3617,6 @@ static int ohci_flush_iso_completions(struct fw_iso_context *base)
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
|
||||
tasklet_enable(&ctx->context.tasklet);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -3863,7 +3835,7 @@ static int pci_probe(struct pci_dev *dev,
|
||||
goto fail_msi;
|
||||
}
|
||||
|
||||
err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
|
||||
err = fw_card_add(&ohci->card, max_receive, link_speed, guid, ohci->n_it + ohci->n_ir);
|
||||
if (err)
|
||||
goto fail_irq;
|
||||
|
||||
|
@ -153,7 +153,205 @@
|
||||
#define OHCI1394_evt_unknown 0xe
|
||||
#define OHCI1394_evt_flushed 0xf
|
||||
|
||||
#define OHCI1394_phy_tcode 0xe
|
||||
|
||||
// Asynchronous Transmit DMA.
|
||||
//
|
||||
// The content of first two quadlets of data for AT DMA is different from the header for IEEE 1394
|
||||
// asynchronous packet.
|
||||
|
||||
#define OHCI1394_AT_DATA_Q0_srcBusID_MASK 0x00800000
|
||||
#define OHCI1394_AT_DATA_Q0_srcBusID_SHIFT 23
|
||||
#define OHCI1394_AT_DATA_Q0_spd_MASK 0x00070000
|
||||
#define OHCI1394_AT_DATA_Q0_spd_SHIFT 16
|
||||
#define OHCI1394_AT_DATA_Q0_tLabel_MASK 0x0000fc00
|
||||
#define OHCI1394_AT_DATA_Q0_tLabel_SHIFT 10
|
||||
#define OHCI1394_AT_DATA_Q0_rt_MASK 0x00000300
|
||||
#define OHCI1394_AT_DATA_Q0_rt_SHIFT 8
|
||||
#define OHCI1394_AT_DATA_Q0_tCode_MASK 0x000000f0
|
||||
#define OHCI1394_AT_DATA_Q0_tCode_SHIFT 4
|
||||
#define OHCI1394_AT_DATA_Q1_destinationId_MASK 0xffff0000
|
||||
#define OHCI1394_AT_DATA_Q1_destinationId_SHIFT 16
|
||||
#define OHCI1394_AT_DATA_Q1_destinationOffsetHigh_MASK 0x0000ffff
|
||||
#define OHCI1394_AT_DATA_Q1_destinationOffsetHigh_SHIFT 0
|
||||
#define OHCI1394_AT_DATA_Q1_rCode_MASK 0x0000f000
|
||||
#define OHCI1394_AT_DATA_Q1_rCode_SHIFT 12
|
||||
|
||||
static inline bool ohci1394_at_data_get_src_bus_id(const __le32 *data)
|
||||
{
|
||||
return !!((data[0] & OHCI1394_AT_DATA_Q0_srcBusID_MASK) >> OHCI1394_AT_DATA_Q0_srcBusID_SHIFT);
|
||||
}
|
||||
|
||||
static inline void ohci1394_at_data_set_src_bus_id(__le32 *data, bool src_bus_id)
|
||||
{
|
||||
data[0] &= cpu_to_le32(~OHCI1394_AT_DATA_Q0_srcBusID_MASK);
|
||||
data[0] |= cpu_to_le32((src_bus_id << OHCI1394_AT_DATA_Q0_srcBusID_SHIFT) & OHCI1394_AT_DATA_Q0_srcBusID_MASK);
|
||||
}
|
||||
|
||||
static inline unsigned int ohci1394_at_data_get_speed(const __le32 *data)
|
||||
{
|
||||
return (le32_to_cpu(data[0]) & OHCI1394_AT_DATA_Q0_spd_MASK) >> OHCI1394_AT_DATA_Q0_spd_SHIFT;
|
||||
}
|
||||
|
||||
static inline void ohci1394_at_data_set_speed(__le32 *data, unsigned int scode)
|
||||
{
|
||||
data[0] &= cpu_to_le32(~OHCI1394_AT_DATA_Q0_spd_MASK);
|
||||
data[0] |= cpu_to_le32((scode << OHCI1394_AT_DATA_Q0_spd_SHIFT) & OHCI1394_AT_DATA_Q0_spd_MASK);
|
||||
}
|
||||
|
||||
static inline unsigned int ohci1394_at_data_get_tlabel(const __le32 *data)
|
||||
{
|
||||
return (le32_to_cpu(data[0]) & OHCI1394_AT_DATA_Q0_tLabel_MASK) >> OHCI1394_AT_DATA_Q0_tLabel_SHIFT;
|
||||
}
|
||||
|
||||
static inline void ohci1394_at_data_set_tlabel(__le32 *data, unsigned int tlabel)
|
||||
{
|
||||
data[0] &= cpu_to_le32(~OHCI1394_AT_DATA_Q0_tLabel_MASK);
|
||||
data[0] |= cpu_to_le32((tlabel << OHCI1394_AT_DATA_Q0_tLabel_SHIFT) & OHCI1394_AT_DATA_Q0_tLabel_MASK);
|
||||
}
|
||||
|
||||
static inline unsigned int ohci1394_at_data_get_retry(const __le32 *data)
|
||||
{
|
||||
return (le32_to_cpu(data[0]) & OHCI1394_AT_DATA_Q0_rt_MASK) >> OHCI1394_AT_DATA_Q0_rt_SHIFT;
|
||||
}
|
||||
|
||||
static inline void ohci1394_at_data_set_retry(__le32 *data, unsigned int retry)
|
||||
{
|
||||
data[0] &= cpu_to_le32(~OHCI1394_AT_DATA_Q0_rt_MASK);
|
||||
data[0] |= cpu_to_le32((retry << OHCI1394_AT_DATA_Q0_rt_SHIFT) & OHCI1394_AT_DATA_Q0_rt_MASK);
|
||||
}
|
||||
|
||||
static inline unsigned int ohci1394_at_data_get_tcode(const __le32 *data)
|
||||
{
|
||||
return (le32_to_cpu(data[0]) & OHCI1394_AT_DATA_Q0_tCode_MASK) >> OHCI1394_AT_DATA_Q0_tCode_SHIFT;
|
||||
}
|
||||
|
||||
static inline void ohci1394_at_data_set_tcode(__le32 *data, unsigned int tcode)
|
||||
{
|
||||
data[0] &= cpu_to_le32(~OHCI1394_AT_DATA_Q0_tCode_MASK);
|
||||
data[0] |= cpu_to_le32((tcode << OHCI1394_AT_DATA_Q0_tCode_SHIFT) & OHCI1394_AT_DATA_Q0_tCode_MASK);
|
||||
}
|
||||
|
||||
static inline unsigned int ohci1394_at_data_get_destination_id(const __le32 *data)
|
||||
{
|
||||
return (le32_to_cpu(data[1]) & OHCI1394_AT_DATA_Q1_destinationId_MASK) >> OHCI1394_AT_DATA_Q1_destinationId_SHIFT;
|
||||
}
|
||||
|
||||
static inline void ohci1394_at_data_set_destination_id(__le32 *data, unsigned int destination_id)
|
||||
{
|
||||
data[1] &= cpu_to_le32(~OHCI1394_AT_DATA_Q1_destinationId_MASK);
|
||||
data[1] |= cpu_to_le32((destination_id << OHCI1394_AT_DATA_Q1_destinationId_SHIFT) & OHCI1394_AT_DATA_Q1_destinationId_MASK);
|
||||
}
|
||||
|
||||
static inline u64 ohci1394_at_data_get_destination_offset(const __le32 *data)
|
||||
{
|
||||
u64 hi = (u64)((le32_to_cpu(data[1]) & OHCI1394_AT_DATA_Q1_destinationOffsetHigh_MASK) >> OHCI1394_AT_DATA_Q1_destinationOffsetHigh_SHIFT);
|
||||
u64 lo = (u64)le32_to_cpu(data[2]);
|
||||
return (hi << 32) | lo;
|
||||
}
|
||||
|
||||
static inline void ohci1394_at_data_set_destination_offset(__le32 *data, u64 offset)
|
||||
{
|
||||
u32 hi = (u32)(offset >> 32);
|
||||
u32 lo = (u32)(offset & 0x00000000ffffffff);
|
||||
data[1] &= cpu_to_le32(~OHCI1394_AT_DATA_Q1_destinationOffsetHigh_MASK);
|
||||
data[1] |= cpu_to_le32((hi << OHCI1394_AT_DATA_Q1_destinationOffsetHigh_SHIFT) & OHCI1394_AT_DATA_Q1_destinationOffsetHigh_MASK);
|
||||
data[2] = cpu_to_le32(lo);
|
||||
}
|
||||
|
||||
static inline unsigned int ohci1394_at_data_get_rcode(const __le32 *data)
|
||||
{
|
||||
return (le32_to_cpu(data[1]) & OHCI1394_AT_DATA_Q1_rCode_MASK) >> OHCI1394_AT_DATA_Q1_rCode_SHIFT;
|
||||
}
|
||||
|
||||
static inline void ohci1394_at_data_set_rcode(__le32 *data, unsigned int rcode)
|
||||
{
|
||||
data[1] &= cpu_to_le32(~OHCI1394_AT_DATA_Q1_rCode_MASK);
|
||||
data[1] |= cpu_to_le32((rcode << OHCI1394_AT_DATA_Q1_rCode_SHIFT) & OHCI1394_AT_DATA_Q1_rCode_MASK);
|
||||
}
|
||||
|
||||
// Isochronous Transmit DMA.
|
||||
//
|
||||
// The content of first two quadlets of data for IT DMA is different from the header for IEEE 1394
|
||||
// isochronous packet.
|
||||
|
||||
#define OHCI1394_IT_DATA_Q0_spd_MASK 0x00070000
|
||||
#define OHCI1394_IT_DATA_Q0_spd_SHIFT 16
|
||||
#define OHCI1394_IT_DATA_Q0_tag_MASK 0x0000c000
|
||||
#define OHCI1394_IT_DATA_Q0_tag_SHIFT 14
|
||||
#define OHCI1394_IT_DATA_Q0_chanNum_MASK 0x00003f00
|
||||
#define OHCI1394_IT_DATA_Q0_chanNum_SHIFT 8
|
||||
#define OHCI1394_IT_DATA_Q0_tcode_MASK 0x000000f0
|
||||
#define OHCI1394_IT_DATA_Q0_tcode_SHIFT 4
|
||||
#define OHCI1394_IT_DATA_Q0_sy_MASK 0x0000000f
|
||||
#define OHCI1394_IT_DATA_Q0_sy_SHIFT 0
|
||||
#define OHCI1394_IT_DATA_Q1_dataLength_MASK 0xffff0000
|
||||
#define OHCI1394_IT_DATA_Q1_dataLength_SHIFT 16
|
||||
|
||||
static inline unsigned int ohci1394_it_data_get_speed(const __le32 *data)
|
||||
{
|
||||
return (le32_to_cpu(data[0]) & OHCI1394_IT_DATA_Q0_spd_MASK) >> OHCI1394_IT_DATA_Q0_spd_SHIFT;
|
||||
}
|
||||
|
||||
static inline void ohci1394_it_data_set_speed(__le32 *data, unsigned int scode)
|
||||
{
|
||||
data[0] &= cpu_to_le32(~OHCI1394_IT_DATA_Q0_spd_MASK);
|
||||
data[0] |= cpu_to_le32((scode << OHCI1394_IT_DATA_Q0_spd_SHIFT) & OHCI1394_IT_DATA_Q0_spd_MASK);
|
||||
}
|
||||
|
||||
static inline unsigned int ohci1394_it_data_get_tag(const __le32 *data)
|
||||
{
|
||||
return (le32_to_cpu(data[0]) & OHCI1394_IT_DATA_Q0_tag_MASK) >> OHCI1394_IT_DATA_Q0_tag_SHIFT;
|
||||
}
|
||||
|
||||
static inline void ohci1394_it_data_set_tag(__le32 *data, unsigned int tag)
|
||||
{
|
||||
data[0] &= cpu_to_le32(~OHCI1394_IT_DATA_Q0_tag_MASK);
|
||||
data[0] |= cpu_to_le32((tag << OHCI1394_IT_DATA_Q0_tag_SHIFT) & OHCI1394_IT_DATA_Q0_tag_MASK);
|
||||
}
|
||||
|
||||
static inline unsigned int ohci1394_it_data_get_channel(const __le32 *data)
|
||||
{
|
||||
return (le32_to_cpu(data[0]) & OHCI1394_IT_DATA_Q0_chanNum_MASK) >> OHCI1394_IT_DATA_Q0_chanNum_SHIFT;
|
||||
}
|
||||
|
||||
static inline void ohci1394_it_data_set_channel(__le32 *data, unsigned int channel)
|
||||
{
|
||||
data[0] &= cpu_to_le32(~OHCI1394_IT_DATA_Q0_chanNum_MASK);
|
||||
data[0] |= cpu_to_le32((channel << OHCI1394_IT_DATA_Q0_chanNum_SHIFT) & OHCI1394_IT_DATA_Q0_chanNum_MASK);
|
||||
}
|
||||
|
||||
static inline unsigned int ohci1394_it_data_get_tcode(const __le32 *data)
|
||||
{
|
||||
return (le32_to_cpu(data[0]) & OHCI1394_IT_DATA_Q0_tcode_MASK) >> OHCI1394_IT_DATA_Q0_tcode_SHIFT;
|
||||
}
|
||||
|
||||
static inline void ohci1394_it_data_set_tcode(__le32 *data, unsigned int tcode)
|
||||
{
|
||||
data[0] &= cpu_to_le32(~OHCI1394_IT_DATA_Q0_tcode_MASK);
|
||||
data[0] |= cpu_to_le32((tcode << OHCI1394_IT_DATA_Q0_tcode_SHIFT) & OHCI1394_IT_DATA_Q0_tcode_MASK);
|
||||
}
|
||||
|
||||
static inline unsigned int ohci1394_it_data_get_sync(const __le32 *data)
|
||||
{
|
||||
return (le32_to_cpu(data[0]) & OHCI1394_IT_DATA_Q0_sy_MASK) >> OHCI1394_IT_DATA_Q0_sy_SHIFT;
|
||||
}
|
||||
|
||||
static inline void ohci1394_it_data_set_sync(__le32 *data, unsigned int sync)
|
||||
{
|
||||
data[0] &= cpu_to_le32(~OHCI1394_IT_DATA_Q0_sy_MASK);
|
||||
data[0] |= cpu_to_le32((sync << OHCI1394_IT_DATA_Q0_sy_SHIFT) & OHCI1394_IT_DATA_Q0_sy_MASK);
|
||||
}
|
||||
|
||||
static inline unsigned int ohci1394_it_data_get_data_length(const __le32 *data)
|
||||
{
|
||||
return (le32_to_cpu(data[1]) & OHCI1394_IT_DATA_Q1_dataLength_MASK) >> OHCI1394_IT_DATA_Q1_dataLength_SHIFT;
|
||||
}
|
||||
|
||||
static inline void ohci1394_it_data_set_data_length(__le32 *data, unsigned int data_length)
|
||||
{
|
||||
data[1] &= cpu_to_le32(~OHCI1394_IT_DATA_Q1_dataLength_MASK);
|
||||
data[1] |= cpu_to_le32((data_length << OHCI1394_IT_DATA_Q1_dataLength_SHIFT) & OHCI1394_IT_DATA_Q1_dataLength_MASK);
|
||||
}
|
||||
|
||||
// Self-ID DMA.
|
||||
|
||||
|
@ -134,6 +134,8 @@ struct fw_card {
|
||||
__be32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4];
|
||||
|
||||
__be32 maint_utility_register;
|
||||
|
||||
struct workqueue_struct *isoc_wq;
|
||||
};
|
||||
|
||||
static inline struct fw_card *fw_card_get(struct fw_card *card)
|
||||
@ -509,6 +511,7 @@ union fw_iso_callback {
|
||||
|
||||
struct fw_iso_context {
|
||||
struct fw_card *card;
|
||||
struct work_struct work;
|
||||
int type;
|
||||
int channel;
|
||||
int speed;
|
||||
@ -528,6 +531,25 @@ int fw_iso_context_queue(struct fw_iso_context *ctx,
|
||||
unsigned long payload);
|
||||
void fw_iso_context_queue_flush(struct fw_iso_context *ctx);
|
||||
int fw_iso_context_flush_completions(struct fw_iso_context *ctx);
|
||||
|
||||
/**
|
||||
* fw_iso_context_schedule_flush_completions() - schedule work item to process isochronous context.
|
||||
* @ctx: the isochronous context
|
||||
*
|
||||
* Schedule a work item on workqueue to process the isochronous context. The registered callback
|
||||
* function is called by the worker when a queued packet buffer with the interrupt flag is
|
||||
* completed, either after transmission in the IT context or after being filled in the IR context.
|
||||
* The callback function is also called when the header buffer in the context becomes full, If it
|
||||
* is required to process the context in the current context, fw_iso_context_flush_completions() is
|
||||
* available instead.
|
||||
*
|
||||
* Context: Any context.
|
||||
*/
|
||||
static inline void fw_iso_context_schedule_flush_completions(struct fw_iso_context *ctx)
|
||||
{
|
||||
queue_work(ctx->card->isoc_wq, &ctx->work);
|
||||
}
|
||||
|
||||
int fw_iso_context_start(struct fw_iso_context *ctx,
|
||||
int cycle, int sync, int tags);
|
||||
int fw_iso_context_stop(struct fw_iso_context *ctx);
|
||||
|
@ -830,13 +830,13 @@ TRACE_EVENT_CONDITION(isoc_inbound_multiple_queue,
|
||||
#ifndef show_cause
|
||||
enum fw_iso_context_completions_cause {
|
||||
FW_ISO_CONTEXT_COMPLETIONS_CAUSE_FLUSH = 0,
|
||||
FW_ISO_CONTEXT_COMPLETIONS_CAUSE_IRQ,
|
||||
FW_ISO_CONTEXT_COMPLETIONS_CAUSE_INTERRUPT,
|
||||
FW_ISO_CONTEXT_COMPLETIONS_CAUSE_HEADER_OVERFLOW,
|
||||
};
|
||||
#define show_cause(cause) \
|
||||
__print_symbolic(cause, \
|
||||
{ FW_ISO_CONTEXT_COMPLETIONS_CAUSE_FLUSH, "FLUSH" }, \
|
||||
{ FW_ISO_CONTEXT_COMPLETIONS_CAUSE_IRQ, "IRQ" }, \
|
||||
{ FW_ISO_CONTEXT_COMPLETIONS_CAUSE_INTERRUPT, "INTERRUPT" }, \
|
||||
{ FW_ISO_CONTEXT_COMPLETIONS_CAUSE_HEADER_OVERFLOW, "HEADER_OVERFLOW" } \
|
||||
)
|
||||
#endif
|
||||
|
@ -615,6 +615,22 @@ static void update_pcm_pointers(struct amdtp_stream *s,
|
||||
// The program in user process should periodically check the status of intermediate
|
||||
// buffer associated to PCM substream to process PCM frames in the buffer, instead
|
||||
// of receiving notification of period elapsed by poll wait.
|
||||
//
|
||||
// Use another work item for period elapsed event to prevent the following AB/BA
|
||||
// deadlock:
|
||||
//
|
||||
// thread 1 thread 2
|
||||
// ================================= =================================
|
||||
// A.work item (process) pcm ioctl (process)
|
||||
// v v
|
||||
// process_rx_packets() B.PCM stream lock
|
||||
// process_tx_packets() v
|
||||
// v callbacks in snd_pcm_ops
|
||||
// update_pcm_pointers() v
|
||||
// snd_pcm_elapsed() fw_iso_context_flush_completions()
|
||||
// snd_pcm_stream_lock_irqsave() disable_work_sync()
|
||||
// v v
|
||||
// wait until release of B wait until A exits
|
||||
if (!pcm->runtime->no_period_wakeup)
|
||||
queue_work(system_highpri_wq, &s->period_work);
|
||||
}
|
||||
@ -1055,8 +1071,15 @@ static void generate_rx_packet_descs(struct amdtp_stream *s, struct pkt_desc *de
|
||||
|
||||
static inline void cancel_stream(struct amdtp_stream *s)
|
||||
{
|
||||
struct work_struct *work = current_work();
|
||||
|
||||
s->packet_index = -1;
|
||||
if (in_softirq())
|
||||
|
||||
// Detect work items for any isochronous context. The work item for pcm_period_work()
|
||||
// should be avoided since the call of snd_pcm_period_elapsed() can reach via
|
||||
// snd_pcm_ops.pointer() under acquiring PCM stream(group) lock and causes dead lock at
|
||||
// snd_pcm_stop_xrun().
|
||||
if (work && work != &s->period_work)
|
||||
amdtp_stream_pcm_abort(s);
|
||||
WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
|
||||
}
|
||||
@ -1856,12 +1879,9 @@ unsigned long amdtp_domain_stream_pcm_pointer(struct amdtp_domain *d,
|
||||
struct amdtp_stream *irq_target = d->irq_target;
|
||||
|
||||
if (irq_target && amdtp_stream_running(irq_target)) {
|
||||
// use wq to prevent AB/BA deadlock competition for
|
||||
// substream lock:
|
||||
// fw_iso_context_flush_completions() acquires
|
||||
// lock by ohci_flush_iso_completions(),
|
||||
// amdtp-stream process_rx_packets() attempts to
|
||||
// acquire same lock by snd_pcm_elapsed()
|
||||
// The work item to call snd_pcm_period_elapsed() can reach here by the call of
|
||||
// snd_pcm_ops.pointer(), however less packets would be available then. Therefore
|
||||
// the following call is just for user process contexts.
|
||||
if (current_work() != &s->period_work)
|
||||
fw_iso_context_flush_completions(irq_target->context);
|
||||
}
|
||||
|
@ -367,6 +367,7 @@ int snd_bebob_create_pcm_devices(struct snd_bebob *bebob)
|
||||
goto end;
|
||||
|
||||
pcm->private_data = bebob;
|
||||
pcm->nonatomic = true;
|
||||
snprintf(pcm->name, sizeof(pcm->name),
|
||||
"%s PCM", bebob->card->shortname);
|
||||
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &playback_ops);
|
||||
|
@ -441,6 +441,7 @@ int snd_dice_create_pcm(struct snd_dice *dice)
|
||||
if (err < 0)
|
||||
return err;
|
||||
pcm->private_data = dice;
|
||||
pcm->nonatomic = true;
|
||||
strcpy(pcm->name, dice->card->shortname);
|
||||
|
||||
if (capture > 0)
|
||||
|
@ -350,6 +350,7 @@ int snd_dg00x_create_pcm_devices(struct snd_dg00x *dg00x)
|
||||
return err;
|
||||
|
||||
pcm->private_data = dg00x;
|
||||
pcm->nonatomic = true;
|
||||
snprintf(pcm->name, sizeof(pcm->name),
|
||||
"%s PCM", dg00x->card->shortname);
|
||||
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &playback_ops);
|
||||
|
@ -390,6 +390,7 @@ int snd_ff_create_pcm_devices(struct snd_ff *ff)
|
||||
return err;
|
||||
|
||||
pcm->private_data = ff;
|
||||
pcm->nonatomic = true;
|
||||
snprintf(pcm->name, sizeof(pcm->name),
|
||||
"%s PCM", ff->card->shortname);
|
||||
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &pcm_playback_ops);
|
||||
|
@ -397,6 +397,7 @@ int snd_efw_create_pcm_devices(struct snd_efw *efw)
|
||||
goto end;
|
||||
|
||||
pcm->private_data = efw;
|
||||
pcm->nonatomic = true;
|
||||
snprintf(pcm->name, sizeof(pcm->name), "%s PCM", efw->card->shortname);
|
||||
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &playback_ops);
|
||||
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &capture_ops);
|
||||
|
@ -454,6 +454,7 @@ static int isight_create_pcm(struct isight *isight)
|
||||
if (err < 0)
|
||||
return err;
|
||||
pcm->private_data = isight;
|
||||
pcm->nonatomic = true;
|
||||
strcpy(pcm->name, "iSight");
|
||||
isight->pcm = pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream;
|
||||
isight->pcm->ops = &ops;
|
||||
|
@ -360,6 +360,7 @@ int snd_motu_create_pcm_devices(struct snd_motu *motu)
|
||||
if (err < 0)
|
||||
return err;
|
||||
pcm->private_data = motu;
|
||||
pcm->nonatomic = true;
|
||||
strcpy(pcm->name, motu->card->shortname);
|
||||
|
||||
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &capture_ops);
|
||||
|
@ -440,6 +440,7 @@ int snd_oxfw_create_pcm(struct snd_oxfw *oxfw)
|
||||
return err;
|
||||
|
||||
pcm->private_data = oxfw;
|
||||
pcm->nonatomic = true;
|
||||
strcpy(pcm->name, oxfw->card->shortname);
|
||||
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &playback_ops);
|
||||
if (cap > 0)
|
||||
|
@ -279,6 +279,7 @@ int snd_tscm_create_pcm_devices(struct snd_tscm *tscm)
|
||||
return err;
|
||||
|
||||
pcm->private_data = tscm;
|
||||
pcm->nonatomic = true;
|
||||
snprintf(pcm->name, sizeof(pcm->name),
|
||||
"%s PCM", tscm->card->shortname);
|
||||
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &playback_ops);
|
||||
|
Loading…
Reference in New Issue
Block a user