diff --git a/drivers/Makefile b/drivers/Makefile
index 26ca9031ea49..adad2f3d438a 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_FC4)		+= fc4/
 obj-$(CONFIG_SCSI)		+= scsi/
 obj-$(CONFIG_ATA)		+= ata/
 obj-$(CONFIG_FUSION)		+= message/
+obj-$(CONFIG_FIREWIRE)		+= firewire/
 obj-$(CONFIG_IEEE1394)		+= ieee1394/
 obj-y				+= cdrom/
 obj-y				+= auxdisplay/
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
new file mode 100644
index 000000000000..5932c72f9e42
--- /dev/null
+++ b/drivers/firewire/Kconfig
@@ -0,0 +1,61 @@
+# -*- shell-script -*-
+
+comment "An alternative FireWire stack is available with EXPERIMENTAL=y"
+	depends on EXPERIMENTAL=n
+
+config FIREWIRE
+	tristate "IEEE 1394 (FireWire) support (JUJU alternative stack, experimental)"
+	depends on EXPERIMENTAL
+	select CRC_ITU_T
+	help
+	  IEEE 1394 describes a high performance serial bus, which is also
+	  known as FireWire(tm) or i.Link(tm) and is used for connecting all
+	  sorts of devices (most notably digital video cameras) to your
+	  computer.
+
+	  If you have FireWire hardware and want to use it, say Y here.  This
+	  is the core support only, you will also need to select a driver for
+	  your IEEE 1394 adapter.
+
+	  To compile this driver as a module, say M here: the module will be
+	  called fw-core.
+
+	  This is the "JUJU" FireWire stack, an alternative implementation
+	  designed for robustness and simplicity.  You can build either this
+	  stack, or the classic stack (the ieee1394 driver, ohci1394 etc.)
+	  or both.
+
+config FIREWIRE_OHCI
+	tristate "Support for OHCI FireWire host controllers"
+	depends on PCI && FIREWIRE
+	help
+	  Enable this driver if you have a FireWire controller based
+	  on the OHCI specification.  For all practical purposes, this
+	  is the only chipset in use, so say Y here.
+
+	  To compile this driver as a module, say M here:  The module will be
+	  called fw-ohci.
+
+	  If you also build ohci1394 of the classic IEEE 1394 driver stack,
+	  blacklist either ohci1394 or fw-ohci to let hotplug load the desired
+	  driver.
+
+config FIREWIRE_SBP2
+	tristate "Support for storage devices (SBP-2 protocol driver)"
+	depends on FIREWIRE && SCSI
+	help
+	  This option enables you to use SBP-2 devices connected to a
+	  FireWire bus.  SBP-2 devices include storage devices like
+	  harddisks and DVD drives, also some other FireWire devices
+	  like scanners.
+
+	  To compile this driver as a module, say M here:  The module will be
+	  called fw-sbp2.
+
+	  You should also enable support for disks, CD-ROMs, etc. in the SCSI
+	  configuration section.
+
+	  If you also build sbp2 of the classic IEEE 1394 driver stack,
+	  blacklist either sbp2 or fw-sbp2 to let hotplug load the desired
+	  driver.
+
diff --git a/drivers/firewire/Makefile b/drivers/firewire/Makefile
new file mode 100644
index 000000000000..fc7d59d4bce0
--- /dev/null
+++ b/drivers/firewire/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for the Linux IEEE 1394 implementation
+#
+
+fw-core-y += fw-card.o fw-topology.o fw-transaction.o fw-iso.o \
+	fw-device.o fw-cdev.o
+
+obj-$(CONFIG_FIREWIRE) += fw-core.o
+obj-$(CONFIG_FIREWIRE_OHCI) += fw-ohci.o
+obj-$(CONFIG_FIREWIRE_SBP2) += fw-sbp2.o
diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/fw-card.c
new file mode 100644
index 000000000000..636151a64add
--- /dev/null
+++ b/drivers/firewire/fw-card.c
@@ -0,0 +1,560 @@
+/*
+ * Copyright (C) 2005-2007  Kristian Hoegsberg <krh@bitplanet.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/crc-itu-t.h>
+#include "fw-transaction.h"
+#include "fw-topology.h"
+#include "fw-device.h"
+
+int fw_compute_block_crc(u32 *block)
+{
+	__be32 be32_block[256];
+	int i, length;
+
+	length = (*block >> 16) & 0xff;
+	for (i = 0; i < length; i++)
+		be32_block[i] = cpu_to_be32(block[i + 1]);
+	*block |= crc_itu_t(0, (u8 *) be32_block, length * 4);
+
+	return length;
+}
+
+static DEFINE_MUTEX(card_mutex);
+static LIST_HEAD(card_list);
+
+static LIST_HEAD(descriptor_list);
+static int descriptor_count;
+
+#define BIB_CRC(v)		((v) <<  0)
+#define BIB_CRC_LENGTH(v)	((v) << 16)
+#define BIB_INFO_LENGTH(v)	((v) << 24)
+
+#define BIB_LINK_SPEED(v)	((v) <<  0)
+#define BIB_GENERATION(v)	((v) <<  4)
+#define BIB_MAX_ROM(v)		((v) <<  8)
+#define BIB_MAX_RECEIVE(v)	((v) << 12)
+#define BIB_CYC_CLK_ACC(v)	((v) << 16)
+#define BIB_PMC			((1) << 27)
+#define BIB_BMC			((1) << 28)
+#define BIB_ISC			((1) << 29)
+#define BIB_CMC			((1) << 30)
+#define BIB_IMC			((1) << 31)
+
+static u32 *
+generate_config_rom(struct fw_card *card, size_t *config_rom_length)
+{
+	struct fw_descriptor *desc;
+	static u32 config_rom[256];
+	int i, j, length;
+
+	/*
+	 * Initialize contents of config rom buffer.  On the OHCI
+	 * controller, block reads to the config rom accesses the host
+	 * memory, but quadlet read access the hardware bus info block
+	 * registers.  That's just crack, but it means we should make
+	 * sure the contents of bus info block in host memory mathces
+	 * the version stored in the OHCI registers.
+	 */
+
+	memset(config_rom, 0, sizeof(config_rom));
+	config_rom[0] = BIB_CRC_LENGTH(4) | BIB_INFO_LENGTH(4) | BIB_CRC(0);
+	config_rom[1] = 0x31333934;
+
+	config_rom[2] =
+		BIB_LINK_SPEED(card->link_speed) |
+		BIB_GENERATION(card->config_rom_generation++ % 14 + 2) |
+		BIB_MAX_ROM(2) |
+		BIB_MAX_RECEIVE(card->max_receive) |
+		BIB_BMC | BIB_ISC | BIB_CMC | BIB_IMC;
+	config_rom[3] = card->guid >> 32;
+	config_rom[4] = card->guid;
+
+	/* Generate root directory. */
+	i = 5;
+	config_rom[i++] = 0;
+	config_rom[i++] = 0x0c0083c0; /* node capabilities */
+	j = i + descriptor_count;
+
+	/* Generate root directory entries for descriptors. */
+	list_for_each_entry (desc, &descriptor_list, link) {
+		if (desc->immediate > 0)
+			config_rom[i++] = desc->immediate;
+		config_rom[i] = desc->key | (j - i);
+		i++;
+		j += desc->length;
+	}
+
+	/* Update root directory length. */
+	config_rom[5] = (i - 5 - 1) << 16;
+
+	/* End of root directory, now copy in descriptors. */
+	list_for_each_entry (desc, &descriptor_list, link) {
+		memcpy(&config_rom[i], desc->data, desc->length * 4);
+		i += desc->length;
+	}
+
+	/* Calculate CRCs for all blocks in the config rom.  This
+	 * assumes that CRC length and info length are identical for
+	 * the bus info block, which is always the case for this
+	 * implementation. */
+	for (i = 0; i < j; i += length + 1)
+		length = fw_compute_block_crc(config_rom + i);
+
+	*config_rom_length = j;
+
+	return config_rom;
+}
+
+static void
+update_config_roms(void)
+{
+	struct fw_card *card;
+	u32 *config_rom;
+	size_t length;
+
+	list_for_each_entry (card, &card_list, link) {
+		config_rom = generate_config_rom(card, &length);
+		card->driver->set_config_rom(card, config_rom, length);
+	}
+}
+
+int
+fw_core_add_descriptor(struct fw_descriptor *desc)
+{
+	size_t i;
+
+	/*
+	 * Check descriptor is valid; the length of all blocks in the
+	 * descriptor has to add up to exactly the length of the
+	 * block.
+	 */
+	i = 0;
+	while (i < desc->length)
+		i += (desc->data[i] >> 16) + 1;
+
+	if (i != desc->length)
+		return -EINVAL;
+
+	mutex_lock(&card_mutex);
+
+	list_add_tail(&desc->link, &descriptor_list);
+	descriptor_count++;
+	if (desc->immediate > 0)
+		descriptor_count++;
+	update_config_roms();
+
+	mutex_unlock(&card_mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL(fw_core_add_descriptor);
+
+void
+fw_core_remove_descriptor(struct fw_descriptor *desc)
+{
+	mutex_lock(&card_mutex);
+
+	list_del(&desc->link);
+	descriptor_count--;
+	if (desc->immediate > 0)
+		descriptor_count--;
+	update_config_roms();
+
+	mutex_unlock(&card_mutex);
+}
+EXPORT_SYMBOL(fw_core_remove_descriptor);
+
+static const char gap_count_table[] = {
+	63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
+};
+
+struct bm_data {
+	struct fw_transaction t;
+	struct {
+		__be32 arg;
+		__be32 data;
+	} lock;
+	u32 old;
+	int rcode;
+	struct completion done;
+};
+
+static void
+complete_bm_lock(struct fw_card *card, int rcode,
+		 void *payload, size_t length, void *data)
+{
+	struct bm_data *bmd = data;
+
+	if (rcode == RCODE_COMPLETE)
+		bmd->old = be32_to_cpu(*(__be32 *) payload);
+	bmd->rcode = rcode;
+	complete(&bmd->done);
+}
+
+static void
+fw_card_bm_work(struct work_struct *work)
+{
+	struct fw_card *card = container_of(work, struct fw_card, work.work);
+	struct fw_device *root;
+	struct bm_data bmd;
+	unsigned long flags;
+	int root_id, new_root_id, irm_id, gap_count, generation, grace;
+	int do_reset = 0;
+
+	spin_lock_irqsave(&card->lock, flags);
+
+	generation = card->generation;
+	root = card->root_node->data;
+	root_id = card->root_node->node_id;
+	grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 10));
+
+	if (card->bm_generation + 1 == generation ||
+	    (card->bm_generation != generation && grace)) {
+		/*
+		 * This first step is to figure out who is IRM and
+		 * then try to become bus manager.  If the IRM is not
+		 * well defined (e.g. does not have an active link
+		 * layer or does not responds to our lock request, we
+		 * will have to do a little vigilante bus management.
+		 * In that case, we do a goto into the gap count logic
+		 * so that when we do the reset, we still optimize the
+		 * gap count.  That could well save a reset in the
+		 * next generation.
+		 */
+
+		irm_id = card->irm_node->node_id;
+		if (!card->irm_node->link_on) {
+			new_root_id = card->local_node->node_id;
+			fw_notify("IRM has link off, making local node (%02x) root.\n",
+				  new_root_id);
+			goto pick_me;
+		}
+
+		bmd.lock.arg = cpu_to_be32(0x3f);
+		bmd.lock.data = cpu_to_be32(card->local_node->node_id);
+
+		spin_unlock_irqrestore(&card->lock, flags);
+
+		init_completion(&bmd.done);
+		fw_send_request(card, &bmd.t, TCODE_LOCK_COMPARE_SWAP,
+				irm_id, generation,
+				SCODE_100, CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID,
+				&bmd.lock, sizeof(bmd.lock),
+				complete_bm_lock, &bmd);
+		wait_for_completion(&bmd.done);
+
+		if (bmd.rcode == RCODE_GENERATION) {
+			/*
+			 * Another bus reset happened. Just return,
+			 * the BM work has been rescheduled.
+			 */
+			return;
+		}
+
+		if (bmd.rcode == RCODE_COMPLETE && bmd.old != 0x3f)
+			/* Somebody else is BM, let them do the work. */
+			return;
+
+		spin_lock_irqsave(&card->lock, flags);
+		if (bmd.rcode != RCODE_COMPLETE) {
+			/*
+			 * The lock request failed, maybe the IRM
+			 * isn't really IRM capable after all. Let's
+			 * do a bus reset and pick the local node as
+			 * root, and thus, IRM.
+			 */
+			new_root_id = card->local_node->node_id;
+			fw_notify("BM lock failed, making local node (%02x) root.\n",
+				  new_root_id);
+			goto pick_me;
+		}
+	} else if (card->bm_generation != generation) {
+		/*
+		 * OK, we weren't BM in the last generation, and it's
+		 * less than 100ms since last bus reset. Reschedule
+		 * this task 100ms from now.
+		 */
+		spin_unlock_irqrestore(&card->lock, flags);
+		schedule_delayed_work(&card->work, DIV_ROUND_UP(HZ, 10));
+		return;
+	}
+
+	/*
+	 * We're bus manager for this generation, so next step is to
+	 * make sure we have an active cycle master and do gap count
+	 * optimization.
+	 */
+	card->bm_generation = generation;
+
+	if (root == NULL) {
+		/*
+		 * Either link_on is false, or we failed to read the
+		 * config rom.  In either case, pick another root.
+		 */
+		new_root_id = card->local_node->node_id;
+	} else if (atomic_read(&root->state) != FW_DEVICE_RUNNING) {
+		/*
+		 * If we haven't probed this device yet, bail out now
+		 * and let's try again once that's done.
+		 */
+		spin_unlock_irqrestore(&card->lock, flags);
+		return;
+	} else if (root->config_rom[2] & BIB_CMC) {
+		/*
+		 * FIXME: I suppose we should set the cmstr bit in the
+		 * STATE_CLEAR register of this node, as described in
+		 * 1394-1995, 8.4.2.6.  Also, send out a force root
+		 * packet for this node.
+		 */
+		new_root_id = root_id;
+	} else {
+		/*
+		 * Current root has an active link layer and we
+		 * successfully read the config rom, but it's not
+		 * cycle master capable.
+		 */
+		new_root_id = card->local_node->node_id;
+	}
+
+ pick_me:
+	/* Now figure out what gap count to set. */
+	if (card->topology_type == FW_TOPOLOGY_A &&
+	    card->root_node->max_hops < ARRAY_SIZE(gap_count_table))
+		gap_count = gap_count_table[card->root_node->max_hops];
+	else
+		gap_count = 63;
+
+	/*
+	 * Finally, figure out if we should do a reset or not.  If we've
+	 * done less that 5 resets with the same physical topology and we
+	 * have either a new root or a new gap count setting, let's do it.
+	 */
+
+	if (card->bm_retries++ < 5 &&
+	    (card->gap_count != gap_count || new_root_id != root_id))
+		do_reset = 1;
+
+	spin_unlock_irqrestore(&card->lock, flags);
+
+	if (do_reset) {
+		fw_notify("phy config: card %d, new root=%x, gap_count=%d\n",
+			  card->index, new_root_id, gap_count);
+		fw_send_phy_config(card, new_root_id, generation, gap_count);
+		fw_core_initiate_bus_reset(card, 1);
+	}
+}
+
+static void
+flush_timer_callback(unsigned long data)
+{
+	struct fw_card *card = (struct fw_card *)data;
+
+	fw_flush_transactions(card);
+}
+
+void
+fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver,
+		   struct device *device)
+{
+	static atomic_t index = ATOMIC_INIT(-1);
+
+	kref_init(&card->kref);
+	card->index = atomic_inc_return(&index);
+	card->driver = driver;
+	card->device = device;
+	card->current_tlabel = 0;
+	card->tlabel_mask = 0;
+	card->color = 0;
+
+	INIT_LIST_HEAD(&card->transaction_list);
+	spin_lock_init(&card->lock);
+	setup_timer(&card->flush_timer,
+		    flush_timer_callback, (unsigned long)card);
+
+	card->local_node = NULL;
+
+	INIT_DELAYED_WORK(&card->work, fw_card_bm_work);
+}
+EXPORT_SYMBOL(fw_card_initialize);
+
+int
+fw_card_add(struct fw_card *card,
+	    u32 max_receive, u32 link_speed, u64 guid)
+{
+	u32 *config_rom;
+	size_t length;
+
+	card->max_receive = max_receive;
+	card->link_speed = link_speed;
+	card->guid = guid;
+
+	/* Activate link_on bit and contender bit in our self ID packets.*/
+	if (card->driver->update_phy_reg(card, 4, 0,
+					 PHY_LINK_ACTIVE | PHY_CONTENDER) < 0)
+		return -EIO;
+
+	/*
+	 * The subsystem grabs a reference when the card is added and
+	 * drops it when the driver calls fw_core_remove_card.
+	 */
+	fw_card_get(card);
+
+	mutex_lock(&card_mutex);
+	config_rom = generate_config_rom(card, &length);
+	list_add_tail(&card->link, &card_list);
+	mutex_unlock(&card_mutex);
+
+	return card->driver->enable(card, config_rom, length);
+}
+EXPORT_SYMBOL(fw_card_add);
+
+
+/*
+ * The next few functions implements a dummy driver that use once a
+ * card driver shuts down an fw_card.  This allows the driver to
+ * cleanly unload, as all IO to the card will be handled by the dummy
+ * driver instead of calling into the (possibly) unloaded module.  The
+ * dummy driver just fails all IO.
+ */
+
+static int
+dummy_enable(struct fw_card *card, u32 *config_rom, size_t length)
+{
+	BUG();
+	return -1;
+}
+
+static int
+dummy_update_phy_reg(struct fw_card *card, int address,
+		     int clear_bits, int set_bits)
+{
+	return -ENODEV;
+}
+
+static int
+dummy_set_config_rom(struct fw_card *card,
+		     u32 *config_rom, size_t length)
+{
+	/*
+	 * We take the card out of card_list before setting the dummy
+	 * driver, so this should never get called.
+	 */
+	BUG();
+	return -1;
+}
+
+static void
+dummy_send_request(struct fw_card *card, struct fw_packet *packet)
+{
+	packet->callback(packet, card, -ENODEV);
+}
+
+static void
+dummy_send_response(struct fw_card *card, struct fw_packet *packet)
+{
+	packet->callback(packet, card, -ENODEV);
+}
+
+static int
+dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet)
+{
+	return -ENOENT;
+}
+
+static int
+dummy_enable_phys_dma(struct fw_card *card,
+		      int node_id, int generation)
+{
+	return -ENODEV;
+}
+
+static struct fw_card_driver dummy_driver = {
+	.name            = "dummy",
+	.enable          = dummy_enable,
+	.update_phy_reg  = dummy_update_phy_reg,
+	.set_config_rom  = dummy_set_config_rom,
+	.send_request    = dummy_send_request,
+	.cancel_packet   = dummy_cancel_packet,
+	.send_response   = dummy_send_response,
+	.enable_phys_dma = dummy_enable_phys_dma,
+};
+
+void
+fw_core_remove_card(struct fw_card *card)
+{
+	card->driver->update_phy_reg(card, 4,
+				     PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
+	fw_core_initiate_bus_reset(card, 1);
+
+	mutex_lock(&card_mutex);
+	list_del(&card->link);
+	mutex_unlock(&card_mutex);
+
+	/* Set up the dummy driver. */
+	card->driver = &dummy_driver;
+
+	fw_flush_transactions(card);
+
+	fw_destroy_nodes(card);
+
+	fw_card_put(card);
+}
+EXPORT_SYMBOL(fw_core_remove_card);
+
+struct fw_card *
+fw_card_get(struct fw_card *card)
+{
+	kref_get(&card->kref);
+
+	return card;
+}
+EXPORT_SYMBOL(fw_card_get);
+
+static void
+release_card(struct kref *kref)
+{
+	struct fw_card *card = container_of(kref, struct fw_card, kref);
+
+	kfree(card);
+}
+
+/*
+ * An assumption for fw_card_put() is that the card driver allocates
+ * the fw_card struct with kalloc and that it has been shut down
+ * before the last ref is dropped.
+ */
+void
+fw_card_put(struct fw_card *card)
+{
+	kref_put(&card->kref, release_card);
+}
+EXPORT_SYMBOL(fw_card_put);
+
+int
+fw_core_initiate_bus_reset(struct fw_card *card, int short_reset)
+{
+	int reg = short_reset ? 5 : 1;
+	int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET;
+
+	return card->driver->update_phy_reg(card, reg, 0, bit);
+}
+EXPORT_SYMBOL(fw_core_initiate_bus_reset);
diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c
new file mode 100644
index 000000000000..0fa5bd54c6a1
--- /dev/null
+++ b/drivers/firewire/fw-cdev.c
@@ -0,0 +1,961 @@
+/*
+ * Char device for device raw access
+ *
+ * Copyright (C) 2005-2007  Kristian Hoegsberg <krh@bitplanet.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/wait.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/vmalloc.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/idr.h>
+#include <linux/compat.h>
+#include <linux/firewire-cdev.h>
+#include <asm/uaccess.h>
+#include "fw-transaction.h"
+#include "fw-topology.h"
+#include "fw-device.h"
+
+struct client;
+struct client_resource {
+	struct list_head link;
+	void (*release)(struct client *client, struct client_resource *r);
+	u32 handle;
+};
+
+/*
+ * dequeue_event() just kfree()'s the event, so the event has to be
+ * the first field in the struct.
+ */
+
+struct event {
+	struct { void *data; size_t size; } v[2];
+	struct list_head link;
+};
+
+struct bus_reset {
+	struct event event;
+	struct fw_cdev_event_bus_reset reset;
+};
+
+struct response {
+	struct event event;
+	struct fw_transaction transaction;
+	struct client *client;
+	struct client_resource resource;
+	struct fw_cdev_event_response response;
+};
+
+struct iso_interrupt {
+	struct event event;
+	struct fw_cdev_event_iso_interrupt interrupt;
+};
+
+struct client {
+	u32 version;
+	struct fw_device *device;
+	spinlock_t lock;
+	u32 resource_handle;
+	struct list_head resource_list;
+	struct list_head event_list;
+	wait_queue_head_t wait;
+	u64 bus_reset_closure;
+
+	struct fw_iso_context *iso_context;
+	u64 iso_closure;
+	struct fw_iso_buffer buffer;
+	unsigned long vm_start;
+
+	struct list_head link;
+};
+
+static inline void __user *
+u64_to_uptr(__u64 value)
+{
+	return (void __user *)(unsigned long)value;
+}
+
+static inline __u64
+uptr_to_u64(void __user *ptr)
+{
+	return (__u64)(unsigned long)ptr;
+}
+
+static int fw_device_op_open(struct inode *inode, struct file *file)
+{
+	struct fw_device *device;
+	struct client *client;
+	unsigned long flags;
+
+	device = fw_device_from_devt(inode->i_rdev);
+	if (device == NULL)
+		return -ENODEV;
+
+	client = kzalloc(sizeof(*client), GFP_KERNEL);
+	if (client == NULL)
+		return -ENOMEM;
+
+	client->device = fw_device_get(device);
+	INIT_LIST_HEAD(&client->event_list);
+	INIT_LIST_HEAD(&client->resource_list);
+	spin_lock_init(&client->lock);
+	init_waitqueue_head(&client->wait);
+
+	file->private_data = client;
+
+	spin_lock_irqsave(&device->card->lock, flags);
+	list_add_tail(&client->link, &device->client_list);
+	spin_unlock_irqrestore(&device->card->lock, flags);
+
+	return 0;
+}
+
+static void queue_event(struct client *client, struct event *event,
+			void *data0, size_t size0, void *data1, size_t size1)
+{
+	unsigned long flags;
+
+	event->v[0].data = data0;
+	event->v[0].size = size0;
+	event->v[1].data = data1;
+	event->v[1].size = size1;
+
+	spin_lock_irqsave(&client->lock, flags);
+
+	list_add_tail(&event->link, &client->event_list);
+	wake_up_interruptible(&client->wait);
+
+	spin_unlock_irqrestore(&client->lock, flags);
+}
+
+static int
+dequeue_event(struct client *client, char __user *buffer, size_t count)
+{
+	unsigned long flags;
+	struct event *event;
+	size_t size, total;
+	int i, retval;
+
+	retval = wait_event_interruptible(client->wait,
+					  !list_empty(&client->event_list) ||
+					  fw_device_is_shutdown(client->device));
+	if (retval < 0)
+		return retval;
+
+	if (list_empty(&client->event_list) &&
+		       fw_device_is_shutdown(client->device))
+		return -ENODEV;
+
+	spin_lock_irqsave(&client->lock, flags);
+	event = container_of(client->event_list.next, struct event, link);
+	list_del(&event->link);
+	spin_unlock_irqrestore(&client->lock, flags);
+
+	total = 0;
+	for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
+		size = min(event->v[i].size, count - total);
+		if (copy_to_user(buffer + total, event->v[i].data, size)) {
+			retval = -EFAULT;
+			goto out;
+		}
+		total += size;
+	}
+	retval = total;
+
+ out:
+	kfree(event);
+
+	return retval;
+}
+
+static ssize_t
+fw_device_op_read(struct file *file,
+		  char __user *buffer, size_t count, loff_t *offset)
+{
+	struct client *client = file->private_data;
+
+	return dequeue_event(client, buffer, count);
+}
+
+static void
+fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
+		     struct client *client)
+{
+	struct fw_card *card = client->device->card;
+
+	event->closure	     = client->bus_reset_closure;
+	event->type          = FW_CDEV_EVENT_BUS_RESET;
+	event->node_id       = client->device->node_id;
+	event->local_node_id = card->local_node->node_id;
+	event->bm_node_id    = 0; /* FIXME: We don't track the BM. */
+	event->irm_node_id   = card->irm_node->node_id;
+	event->root_node_id  = card->root_node->node_id;
+	event->generation    = card->generation;
+}
+
+static void
+for_each_client(struct fw_device *device,
+		void (*callback)(struct client *client))
+{
+	struct fw_card *card = device->card;
+	struct client *c;
+	unsigned long flags;
+
+	spin_lock_irqsave(&card->lock, flags);
+
+	list_for_each_entry(c, &device->client_list, link)
+		callback(c);
+
+	spin_unlock_irqrestore(&card->lock, flags);
+}
+
+static void
+queue_bus_reset_event(struct client *client)
+{
+	struct bus_reset *bus_reset;
+
+	bus_reset = kzalloc(sizeof(*bus_reset), GFP_ATOMIC);
+	if (bus_reset == NULL) {
+		fw_notify("Out of memory when allocating bus reset event\n");
+		return;
+	}
+
+	fill_bus_reset_event(&bus_reset->reset, client);
+
+	queue_event(client, &bus_reset->event,
+		    &bus_reset->reset, sizeof(bus_reset->reset), NULL, 0);
+}
+
+void fw_device_cdev_update(struct fw_device *device)
+{
+	for_each_client(device, queue_bus_reset_event);
+}
+
+static void wake_up_client(struct client *client)
+{
+	wake_up_interruptible(&client->wait);
+}
+
+void fw_device_cdev_remove(struct fw_device *device)
+{
+	for_each_client(device, wake_up_client);
+}
+
+static int ioctl_get_info(struct client *client, void *buffer)
+{
+	struct fw_cdev_get_info *get_info = buffer;
+	struct fw_cdev_event_bus_reset bus_reset;
+
+	client->version = get_info->version;
+	get_info->version = FW_CDEV_VERSION;
+
+	if (get_info->rom != 0) {
+		void __user *uptr = u64_to_uptr(get_info->rom);
+		size_t want = get_info->rom_length;
+		size_t have = client->device->config_rom_length * 4;
+
+		if (copy_to_user(uptr, client->device->config_rom,
+				 min(want, have)))
+			return -EFAULT;
+	}
+	get_info->rom_length = client->device->config_rom_length * 4;
+
+	client->bus_reset_closure = get_info->bus_reset_closure;
+	if (get_info->bus_reset != 0) {
+		void __user *uptr = u64_to_uptr(get_info->bus_reset);
+
+		fill_bus_reset_event(&bus_reset, client);
+		if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset)))
+			return -EFAULT;
+	}
+
+	get_info->card = client->device->card->index;
+
+	return 0;
+}
+
+static void
+add_client_resource(struct client *client, struct client_resource *resource)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&client->lock, flags);
+	list_add_tail(&resource->link, &client->resource_list);
+	resource->handle = client->resource_handle++;
+	spin_unlock_irqrestore(&client->lock, flags);
+}
+
+static int
+release_client_resource(struct client *client, u32 handle,
+			struct client_resource **resource)
+{
+	struct client_resource *r;
+	unsigned long flags;
+
+	spin_lock_irqsave(&client->lock, flags);
+	list_for_each_entry(r, &client->resource_list, link) {
+		if (r->handle == handle) {
+			list_del(&r->link);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&client->lock, flags);
+
+	if (&r->link == &client->resource_list)
+		return -EINVAL;
+
+	if (resource)
+		*resource = r;
+	else
+		r->release(client, r);
+
+	return 0;
+}
+
+static void
+release_transaction(struct client *client, struct client_resource *resource)
+{
+	struct response *response =
+		container_of(resource, struct response, resource);
+
+	fw_cancel_transaction(client->device->card, &response->transaction);
+}
+
+static void
+complete_transaction(struct fw_card *card, int rcode,
+		     void *payload, size_t length, void *data)
+{
+	struct response *response = data;
+	struct client *client = response->client;
+	unsigned long flags;
+
+	if (length < response->response.length)
+		response->response.length = length;
+	if (rcode == RCODE_COMPLETE)
+		memcpy(response->response.data, payload,
+		       response->response.length);
+
+	spin_lock_irqsave(&client->lock, flags);
+	list_del(&response->resource.link);
+	spin_unlock_irqrestore(&client->lock, flags);
+
+	response->response.type   = FW_CDEV_EVENT_RESPONSE;
+	response->response.rcode  = rcode;
+	queue_event(client, &response->event,
+		    &response->response, sizeof(response->response),
+		    response->response.data, response->response.length);
+}
+
+static ssize_t ioctl_send_request(struct client *client, void *buffer)
+{
+	struct fw_device *device = client->device;
+	struct fw_cdev_send_request *request = buffer;
+	struct response *response;
+
+	/* What is the biggest size we'll accept, really? */
+	if (request->length > 4096)
+		return -EINVAL;
+
+	response = kmalloc(sizeof(*response) + request->length, GFP_KERNEL);
+	if (response == NULL)
+		return -ENOMEM;
+
+	response->client = client;
+	response->response.length = request->length;
+	response->response.closure = request->closure;
+
+	if (request->data &&
+	    copy_from_user(response->response.data,
+			   u64_to_uptr(request->data), request->length)) {
+		kfree(response);
+		return -EFAULT;
+	}
+
+	response->resource.release = release_transaction;
+	add_client_resource(client, &response->resource);
+
+	fw_send_request(device->card, &response->transaction,
+			request->tcode & 0x1f,
+			device->node->node_id,
+			request->generation,
+			device->node->max_speed,
+			request->offset,
+			response->response.data, request->length,
+			complete_transaction, response);
+
+	if (request->data)
+		return sizeof(request) + request->length;
+	else
+		return sizeof(request);
+}
+
+struct address_handler {
+	struct fw_address_handler handler;
+	__u64 closure;
+	struct client *client;
+	struct client_resource resource;
+};
+
+struct request {
+	struct fw_request *request;
+	void *data;
+	size_t length;
+	struct client_resource resource;
+};
+
+struct request_event {
+	struct event event;
+	struct fw_cdev_event_request request;
+};
+
+static void
+release_request(struct client *client, struct client_resource *resource)
+{
+	struct request *request =
+		container_of(resource, struct request, resource);
+
+	fw_send_response(client->device->card, request->request,
+			 RCODE_CONFLICT_ERROR);
+	kfree(request);
+}
+
+static void
+handle_request(struct fw_card *card, struct fw_request *r,
+	       int tcode, int destination, int source,
+	       int generation, int speed,
+	       unsigned long long offset,
+	       void *payload, size_t length, void *callback_data)
+{
+	struct address_handler *handler = callback_data;
+	struct request *request;
+	struct request_event *e;
+	struct client *client = handler->client;
+
+	request = kmalloc(sizeof(*request), GFP_ATOMIC);
+	e = kmalloc(sizeof(*e), GFP_ATOMIC);
+	if (request == NULL || e == NULL) {
+		kfree(request);
+		kfree(e);
+		fw_send_response(card, r, RCODE_CONFLICT_ERROR);
+		return;
+	}
+
+	request->request = r;
+	request->data    = payload;
+	request->length  = length;
+
+	request->resource.release = release_request;
+	add_client_resource(client, &request->resource);
+
+	e->request.type    = FW_CDEV_EVENT_REQUEST;
+	e->request.tcode   = tcode;
+	e->request.offset  = offset;
+	e->request.length  = length;
+	e->request.handle  = request->resource.handle;
+	e->request.closure = handler->closure;
+
+	queue_event(client, &e->event,
+		    &e->request, sizeof(e->request), payload, length);
+}
+
+static void
+release_address_handler(struct client *client,
+			struct client_resource *resource)
+{
+	struct address_handler *handler =
+		container_of(resource, struct address_handler, resource);
+
+	fw_core_remove_address_handler(&handler->handler);
+	kfree(handler);
+}
+
+static int ioctl_allocate(struct client *client, void *buffer)
+{
+	struct fw_cdev_allocate *request = buffer;
+	struct address_handler *handler;
+	struct fw_address_region region;
+
+	handler = kmalloc(sizeof(*handler), GFP_KERNEL);
+	if (handler == NULL)
+		return -ENOMEM;
+
+	region.start = request->offset;
+	region.end = request->offset + request->length;
+	handler->handler.length = request->length;
+	handler->handler.address_callback = handle_request;
+	handler->handler.callback_data = handler;
+	handler->closure = request->closure;
+	handler->client = client;
+
+	if (fw_core_add_address_handler(&handler->handler, &region) < 0) {
+		kfree(handler);
+		return -EBUSY;
+	}
+
+	handler->resource.release = release_address_handler;
+	add_client_resource(client, &handler->resource);
+	request->handle = handler->resource.handle;
+
+	return 0;
+}
+
+static int ioctl_deallocate(struct client *client, void *buffer)
+{
+	struct fw_cdev_deallocate *request = buffer;
+
+	return release_client_resource(client, request->handle, NULL);
+}
+
+static int ioctl_send_response(struct client *client, void *buffer)
+{
+	struct fw_cdev_send_response *request = buffer;
+	struct client_resource *resource;
+	struct request *r;
+
+	if (release_client_resource(client, request->handle, &resource) < 0)
+		return -EINVAL;
+	r = container_of(resource, struct request, resource);
+	if (request->length < r->length)
+		r->length = request->length;
+	if (copy_from_user(r->data, u64_to_uptr(request->data), r->length))
+		return -EFAULT;
+
+	fw_send_response(client->device->card, r->request, request->rcode);
+	kfree(r);
+
+	return 0;
+}
+
+static int ioctl_initiate_bus_reset(struct client *client, void *buffer)
+{
+	struct fw_cdev_initiate_bus_reset *request = buffer;
+	int short_reset;
+
+	short_reset = (request->type == FW_CDEV_SHORT_RESET);
+
+	return fw_core_initiate_bus_reset(client->device->card, short_reset);
+}
+
+struct descriptor {
+	struct fw_descriptor d;
+	struct client_resource resource;
+	u32 data[0];
+};
+
+static void release_descriptor(struct client *client,
+			       struct client_resource *resource)
+{
+	struct descriptor *descriptor =
+		container_of(resource, struct descriptor, resource);
+
+	fw_core_remove_descriptor(&descriptor->d);
+	kfree(descriptor);
+}
+
+static int ioctl_add_descriptor(struct client *client, void *buffer)
+{
+	struct fw_cdev_add_descriptor *request = buffer;
+	struct descriptor *descriptor;
+	int retval;
+
+	if (request->length > 256)
+		return -EINVAL;
+
+	descriptor =
+		kmalloc(sizeof(*descriptor) + request->length * 4, GFP_KERNEL);
+	if (descriptor == NULL)
+		return -ENOMEM;
+
+	if (copy_from_user(descriptor->data,
+			   u64_to_uptr(request->data), request->length * 4)) {
+		kfree(descriptor);
+		return -EFAULT;
+	}
+
+	descriptor->d.length = request->length;
+	descriptor->d.immediate = request->immediate;
+	descriptor->d.key = request->key;
+	descriptor->d.data = descriptor->data;
+
+	retval = fw_core_add_descriptor(&descriptor->d);
+	if (retval < 0) {
+		kfree(descriptor);
+		return retval;
+	}
+
+	descriptor->resource.release = release_descriptor;
+	add_client_resource(client, &descriptor->resource);
+	request->handle = descriptor->resource.handle;
+
+	return 0;
+}
+
+static int ioctl_remove_descriptor(struct client *client, void *buffer)
+{
+	struct fw_cdev_remove_descriptor *request = buffer;
+
+	return release_client_resource(client, request->handle, NULL);
+}
+
+static void
+iso_callback(struct fw_iso_context *context, u32 cycle,
+	     size_t header_length, void *header, void *data)
+{
+	struct client *client = data;
+	struct iso_interrupt *interrupt;
+
+	interrupt = kzalloc(sizeof(*interrupt) + header_length, GFP_ATOMIC);
+	if (interrupt == NULL)
+		return;
+
+	interrupt->interrupt.type      = FW_CDEV_EVENT_ISO_INTERRUPT;
+	interrupt->interrupt.closure   = client->iso_closure;
+	interrupt->interrupt.cycle     = cycle;
+	interrupt->interrupt.header_length = header_length;
+	memcpy(interrupt->interrupt.header, header, header_length);
+	queue_event(client, &interrupt->event,
+		    &interrupt->interrupt,
+		    sizeof(interrupt->interrupt) + header_length, NULL, 0);
+}
+
+static int ioctl_create_iso_context(struct client *client, void *buffer)
+{
+	struct fw_cdev_create_iso_context *request = buffer;
+
+	if (request->channel > 63)
+		return -EINVAL;
+
+	switch (request->type) {
+	case FW_ISO_CONTEXT_RECEIVE:
+		if (request->header_size < 4 || (request->header_size & 3))
+			return -EINVAL;
+
+		break;
+
+	case FW_ISO_CONTEXT_TRANSMIT:
+		if (request->speed > SCODE_3200)
+			return -EINVAL;
+
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	client->iso_closure = request->closure;
+	client->iso_context = fw_iso_context_create(client->device->card,
+						    request->type,
+						    request->channel,
+						    request->speed,
+						    request->header_size,
+						    iso_callback, client);
+	if (IS_ERR(client->iso_context))
+		return PTR_ERR(client->iso_context);
+
+	/* We only support one context at this time. */
+	request->handle = 0;
+
+	return 0;
+}
+
+static int ioctl_queue_iso(struct client *client, void *buffer)
+{
+	struct fw_cdev_queue_iso *request = buffer;
+	struct fw_cdev_iso_packet __user *p, *end, *next;
+	struct fw_iso_context *ctx = client->iso_context;
+	unsigned long payload, buffer_end, header_length;
+	int count;
+	struct {
+		struct fw_iso_packet packet;
+		u8 header[256];
+	} u;
+
+	if (ctx == NULL || request->handle != 0)
+		return -EINVAL;
+
+	/*
+	 * If the user passes a non-NULL data pointer, has mmap()'ed
+	 * the iso buffer, and the pointer points inside the buffer,
+	 * we setup the payload pointers accordingly.  Otherwise we
+	 * set them both to 0, which will still let packets with
+	 * payload_length == 0 through.  In other words, if no packets
+	 * use the indirect payload, the iso buffer need not be mapped
+	 * and the request->data pointer is ignored.
+	 */
+
+	payload = (unsigned long)request->data - client->vm_start;
+	buffer_end = client->buffer.page_count << PAGE_SHIFT;
+	if (request->data == 0 || client->buffer.pages == NULL ||
+	    payload >= buffer_end) {
+		payload = 0;
+		buffer_end = 0;
+	}
+
+	if (!access_ok(VERIFY_READ, request->packets, request->size))
+		return -EFAULT;
+
+	p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(request->packets);
+	end = (void __user *)p + request->size;
+	count = 0;
+	while (p < end) {
+		if (__copy_from_user(&u.packet, p, sizeof(*p)))
+			return -EFAULT;
+
+		if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) {
+			header_length = u.packet.header_length;
+		} else {
+			/*
+			 * We require that header_length is a multiple of
+			 * the fixed header size, ctx->header_size.
+			 */
+			if (ctx->header_size == 0) {
+				if (u.packet.header_length > 0)
+					return -EINVAL;
+			} else if (u.packet.header_length % ctx->header_size != 0) {
+				return -EINVAL;
+			}
+			header_length = 0;
+		}
+
+		next = (struct fw_cdev_iso_packet __user *)
+			&p->header[header_length / 4];
+		if (next > end)
+			return -EINVAL;
+		if (__copy_from_user
+		    (u.packet.header, p->header, header_length))
+			return -EFAULT;
+		if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
+		    u.packet.header_length + u.packet.payload_length > 0)
+			return -EINVAL;
+		if (payload + u.packet.payload_length > buffer_end)
+			return -EINVAL;
+
+		if (fw_iso_context_queue(ctx, &u.packet,
+					 &client->buffer, payload))
+			break;
+
+		p = next;
+		payload += u.packet.payload_length;
+		count++;
+	}
+
+	request->size    -= uptr_to_u64(p) - request->packets;
+	request->packets  = uptr_to_u64(p);
+	request->data     = client->vm_start + payload;
+
+	return count;
+}
+
+static int ioctl_start_iso(struct client *client, void *buffer)
+{
+	struct fw_cdev_start_iso *request = buffer;
+
+	if (request->handle != 0)
+		return -EINVAL;
+	if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) {
+		if (request->tags == 0 || request->tags > 15)
+			return -EINVAL;
+
+		if (request->sync > 15)
+			return -EINVAL;
+	}
+
+	return fw_iso_context_start(client->iso_context, request->cycle,
+				    request->sync, request->tags);
+}
+
+static int ioctl_stop_iso(struct client *client, void *buffer)
+{
+	struct fw_cdev_stop_iso *request = buffer;
+
+	if (request->handle != 0)
+		return -EINVAL;
+
+	return fw_iso_context_stop(client->iso_context);
+}
+
+static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
+	ioctl_get_info,
+	ioctl_send_request,
+	ioctl_allocate,
+	ioctl_deallocate,
+	ioctl_send_response,
+	ioctl_initiate_bus_reset,
+	ioctl_add_descriptor,
+	ioctl_remove_descriptor,
+	ioctl_create_iso_context,
+	ioctl_queue_iso,
+	ioctl_start_iso,
+	ioctl_stop_iso,
+};
+
+static int
+dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg)
+{
+	char buffer[256];
+	int retval;
+
+	if (_IOC_TYPE(cmd) != '#' ||
+	    _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers))
+		return -EINVAL;
+
+	if (_IOC_DIR(cmd) & _IOC_WRITE) {
+		if (_IOC_SIZE(cmd) > sizeof(buffer) ||
+		    copy_from_user(buffer, arg, _IOC_SIZE(cmd)))
+			return -EFAULT;
+	}
+
+	retval = ioctl_handlers[_IOC_NR(cmd)](client, buffer);
+	if (retval < 0)
+		return retval;
+
+	if (_IOC_DIR(cmd) & _IOC_READ) {
+		if (_IOC_SIZE(cmd) > sizeof(buffer) ||
+		    copy_to_user(arg, buffer, _IOC_SIZE(cmd)))
+			return -EFAULT;
+	}
+
+	return 0;
+}
+
+static long
+fw_device_op_ioctl(struct file *file,
+		   unsigned int cmd, unsigned long arg)
+{
+	struct client *client = file->private_data;
+
+	return dispatch_ioctl(client, cmd, (void __user *) arg);
+}
+
+#ifdef CONFIG_COMPAT
+static long
+fw_device_op_compat_ioctl(struct file *file,
+			  unsigned int cmd, unsigned long arg)
+{
+	struct client *client = file->private_data;
+
+	return dispatch_ioctl(client, cmd, compat_ptr(arg));
+}
+#endif
+
+static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct client *client = file->private_data;
+	enum dma_data_direction direction;
+	unsigned long size;
+	int page_count, retval;
+
+	/* FIXME: We could support multiple buffers, but we don't. */
+	if (client->buffer.pages != NULL)
+		return -EBUSY;
+
+	if (!(vma->vm_flags & VM_SHARED))
+		return -EINVAL;
+
+	if (vma->vm_start & ~PAGE_MASK)
+		return -EINVAL;
+
+	client->vm_start = vma->vm_start;
+	size = vma->vm_end - vma->vm_start;
+	page_count = size >> PAGE_SHIFT;
+	if (size & ~PAGE_MASK)
+		return -EINVAL;
+
+	if (vma->vm_flags & VM_WRITE)
+		direction = DMA_TO_DEVICE;
+	else
+		direction = DMA_FROM_DEVICE;
+
+	retval = fw_iso_buffer_init(&client->buffer, client->device->card,
+				    page_count, direction);
+	if (retval < 0)
+		return retval;
+
+	retval = fw_iso_buffer_map(&client->buffer, vma);
+	if (retval < 0)
+		fw_iso_buffer_destroy(&client->buffer, client->device->card);
+
+	return retval;
+}
+
+static int fw_device_op_release(struct inode *inode, struct file *file)
+{
+	struct client *client = file->private_data;
+	struct event *e, *next_e;
+	struct client_resource *r, *next_r;
+	unsigned long flags;
+
+	if (client->buffer.pages)
+		fw_iso_buffer_destroy(&client->buffer, client->device->card);
+
+	if (client->iso_context)
+		fw_iso_context_destroy(client->iso_context);
+
+	list_for_each_entry_safe(r, next_r, &client->resource_list, link)
+		r->release(client, r);
+
+	/*
+	 * FIXME: We should wait for the async tasklets to stop
+	 * running before freeing the memory.
+	 */
+
+	list_for_each_entry_safe(e, next_e, &client->event_list, link)
+		kfree(e);
+
+	spin_lock_irqsave(&client->device->card->lock, flags);
+	list_del(&client->link);
+	spin_unlock_irqrestore(&client->device->card->lock, flags);
+
+	fw_device_put(client->device);
+	kfree(client);
+
+	return 0;
+}
+
+static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
+{
+	struct client *client = file->private_data;
+	unsigned int mask = 0;
+
+	poll_wait(file, &client->wait, pt);
+
+	if (fw_device_is_shutdown(client->device))
+		mask |= POLLHUP | POLLERR;
+	if (!list_empty(&client->event_list))
+		mask |= POLLIN | POLLRDNORM;
+
+	return mask;
+}
+
+const struct file_operations fw_device_ops = {
+	.owner		= THIS_MODULE,
+	.open		= fw_device_op_open,
+	.read		= fw_device_op_read,
+	.unlocked_ioctl	= fw_device_op_ioctl,
+	.poll		= fw_device_op_poll,
+	.release	= fw_device_op_release,
+	.mmap		= fw_device_op_mmap,
+
+#ifdef CONFIG_COMPAT
+	.compat_ioctl	= fw_device_op_compat_ioctl,
+#endif
+};
diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/fw-device.c
new file mode 100644
index 000000000000..c1ce465d9710
--- /dev/null
+++ b/drivers/firewire/fw-device.c
@@ -0,0 +1,813 @@
+/*
+ * Device probing and sysfs code.
+ *
+ * Copyright (C) 2005-2006  Kristian Hoegsberg <krh@bitplanet.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/wait.h>
+#include <linux/errno.h>
+#include <linux/kthread.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/idr.h>
+#include <linux/rwsem.h>
+#include <asm/semaphore.h>
+#include <linux/ctype.h>
+#include "fw-transaction.h"
+#include "fw-topology.h"
+#include "fw-device.h"
+
+void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 * p)
+{
+	ci->p = p + 1;
+	ci->end = ci->p + (p[0] >> 16);
+}
+EXPORT_SYMBOL(fw_csr_iterator_init);
+
+int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value)
+{
+	*key = *ci->p >> 24;
+	*value = *ci->p & 0xffffff;
+
+	return ci->p++ < ci->end;
+}
+EXPORT_SYMBOL(fw_csr_iterator_next);
+
+static int is_fw_unit(struct device *dev);
+
+static int match_unit_directory(u32 * directory, const struct fw_device_id *id)
+{
+	struct fw_csr_iterator ci;
+	int key, value, match;
+
+	match = 0;
+	fw_csr_iterator_init(&ci, directory);
+	while (fw_csr_iterator_next(&ci, &key, &value)) {
+		if (key == CSR_VENDOR && value == id->vendor)
+			match |= FW_MATCH_VENDOR;
+		if (key == CSR_MODEL && value == id->model)
+			match |= FW_MATCH_MODEL;
+		if (key == CSR_SPECIFIER_ID && value == id->specifier_id)
+			match |= FW_MATCH_SPECIFIER_ID;
+		if (key == CSR_VERSION && value == id->version)
+			match |= FW_MATCH_VERSION;
+	}
+
+	return (match & id->match_flags) == id->match_flags;
+}
+
+static int fw_unit_match(struct device *dev, struct device_driver *drv)
+{
+	struct fw_unit *unit = fw_unit(dev);
+	struct fw_driver *driver = fw_driver(drv);
+	int i;
+
+	/* We only allow binding to fw_units. */
+	if (!is_fw_unit(dev))
+		return 0;
+
+	for (i = 0; driver->id_table[i].match_flags != 0; i++) {
+		if (match_unit_directory(unit->directory, &driver->id_table[i]))
+			return 1;
+	}
+
+	return 0;
+}
+
+static int get_modalias(struct fw_unit *unit, char *buffer, size_t buffer_size)
+{
+	struct fw_device *device = fw_device(unit->device.parent);
+	struct fw_csr_iterator ci;
+
+	int key, value;
+	int vendor = 0;
+	int model = 0;
+	int specifier_id = 0;
+	int version = 0;
+
+	fw_csr_iterator_init(&ci, &device->config_rom[5]);
+	while (fw_csr_iterator_next(&ci, &key, &value)) {
+		switch (key) {
+		case CSR_VENDOR:
+			vendor = value;
+			break;
+		case CSR_MODEL:
+			model = value;
+			break;
+		}
+	}
+
+	fw_csr_iterator_init(&ci, unit->directory);
+	while (fw_csr_iterator_next(&ci, &key, &value)) {
+		switch (key) {
+		case CSR_SPECIFIER_ID:
+			specifier_id = value;
+			break;
+		case CSR_VERSION:
+			version = value;
+			break;
+		}
+	}
+
+	return snprintf(buffer, buffer_size,
+			"ieee1394:ven%08Xmo%08Xsp%08Xver%08X",
+			vendor, model, specifier_id, version);
+}
+
+static int
+fw_unit_uevent(struct device *dev, char **envp, int num_envp,
+	       char *buffer, int buffer_size)
+{
+	struct fw_unit *unit = fw_unit(dev);
+	char modalias[64];
+	int length = 0;
+	int i = 0;
+
+	get_modalias(unit, modalias, sizeof(modalias));
+
+	if (add_uevent_var(envp, num_envp, &i,
+			   buffer, buffer_size, &length,
+			   "MODALIAS=%s", modalias))
+		return -ENOMEM;
+
+	envp[i] = NULL;
+
+	return 0;
+}
+
+struct bus_type fw_bus_type = {
+	.name = "firewire",
+	.match = fw_unit_match,
+};
+EXPORT_SYMBOL(fw_bus_type);
+
+struct fw_device *fw_device_get(struct fw_device *device)
+{
+	get_device(&device->device);
+
+	return device;
+}
+
+void fw_device_put(struct fw_device *device)
+{
+	put_device(&device->device);
+}
+
+static void fw_device_release(struct device *dev)
+{
+	struct fw_device *device = fw_device(dev);
+	unsigned long flags;
+
+	/*
+	 * Take the card lock so we don't set this to NULL while a
+	 * FW_NODE_UPDATED callback is being handled.
+	 */
+	spin_lock_irqsave(&device->card->lock, flags);
+	device->node->data = NULL;
+	spin_unlock_irqrestore(&device->card->lock, flags);
+
+	fw_node_put(device->node);
+	fw_card_put(device->card);
+	kfree(device->config_rom);
+	kfree(device);
+}
+
+int fw_device_enable_phys_dma(struct fw_device *device)
+{
+	return device->card->driver->enable_phys_dma(device->card,
+						     device->node_id,
+						     device->generation);
+}
+EXPORT_SYMBOL(fw_device_enable_phys_dma);
+
+struct config_rom_attribute {
+	struct device_attribute attr;
+	u32 key;
+};
+
+static ssize_t
+show_immediate(struct device *dev, struct device_attribute *dattr, char *buf)
+{
+	struct config_rom_attribute *attr =
+		container_of(dattr, struct config_rom_attribute, attr);
+	struct fw_csr_iterator ci;
+	u32 *dir;
+	int key, value;
+
+	if (is_fw_unit(dev))
+		dir = fw_unit(dev)->directory;
+	else
+		dir = fw_device(dev)->config_rom + 5;
+
+	fw_csr_iterator_init(&ci, dir);
+	while (fw_csr_iterator_next(&ci, &key, &value))
+		if (attr->key == key)
+			return snprintf(buf, buf ? PAGE_SIZE : 0,
+					"0x%06x\n", value);
+
+	return -ENOENT;
+}
+
+#define IMMEDIATE_ATTR(name, key)				\
+	{ __ATTR(name, S_IRUGO, show_immediate, NULL), key }
+
+static ssize_t
+show_text_leaf(struct device *dev, struct device_attribute *dattr, char *buf)
+{
+	struct config_rom_attribute *attr =
+		container_of(dattr, struct config_rom_attribute, attr);
+	struct fw_csr_iterator ci;
+	u32 *dir, *block = NULL, *p, *end;
+	int length, key, value, last_key = 0;
+	char *b;
+
+	if (is_fw_unit(dev))
+		dir = fw_unit(dev)->directory;
+	else
+		dir = fw_device(dev)->config_rom + 5;
+
+	fw_csr_iterator_init(&ci, dir);
+	while (fw_csr_iterator_next(&ci, &key, &value)) {
+		if (attr->key == last_key &&
+		    key == (CSR_DESCRIPTOR | CSR_LEAF))
+			block = ci.p - 1 + value;
+		last_key = key;
+	}
+
+	if (block == NULL)
+		return -ENOENT;
+
+	length = min(block[0] >> 16, 256U);
+	if (length < 3)
+		return -ENOENT;
+
+	if (block[1] != 0 || block[2] != 0)
+		/* Unknown encoding. */
+		return -ENOENT;
+
+	if (buf == NULL)
+		return length * 4;
+
+	b = buf;
+	end = &block[length + 1];
+	for (p = &block[3]; p < end; p++, b += 4)
+		* (u32 *) b = (__force u32) __cpu_to_be32(*p);
+
+	/* Strip trailing whitespace and add newline. */
+	while (b--, (isspace(*b) || *b == '\0') && b > buf);
+	strcpy(b + 1, "\n");
+
+	return b + 2 - buf;
+}
+
+#define TEXT_LEAF_ATTR(name, key)				\
+	{ __ATTR(name, S_IRUGO, show_text_leaf, NULL), key }
+
+static struct config_rom_attribute config_rom_attributes[] = {
+	IMMEDIATE_ATTR(vendor, CSR_VENDOR),
+	IMMEDIATE_ATTR(hardware_version, CSR_HARDWARE_VERSION),
+	IMMEDIATE_ATTR(specifier_id, CSR_SPECIFIER_ID),
+	IMMEDIATE_ATTR(version, CSR_VERSION),
+	IMMEDIATE_ATTR(model, CSR_MODEL),
+	TEXT_LEAF_ATTR(vendor_name, CSR_VENDOR),
+	TEXT_LEAF_ATTR(model_name, CSR_MODEL),
+	TEXT_LEAF_ATTR(hardware_version_name, CSR_HARDWARE_VERSION),
+};
+
+static void
+init_fw_attribute_group(struct device *dev,
+			struct device_attribute *attrs,
+			struct fw_attribute_group *group)
+{
+	struct device_attribute *attr;
+	int i, j;
+
+	for (j = 0; attrs[j].attr.name != NULL; j++)
+		group->attrs[j] = &attrs[j].attr;
+
+	for (i = 0; i < ARRAY_SIZE(config_rom_attributes); i++) {
+		attr = &config_rom_attributes[i].attr;
+		if (attr->show(dev, attr, NULL) < 0)
+			continue;
+		group->attrs[j++] = &attr->attr;
+	}
+
+	BUG_ON(j >= ARRAY_SIZE(group->attrs));
+	group->attrs[j++] = NULL;
+	group->groups[0] = &group->group;
+	group->groups[1] = NULL;
+	group->group.attrs = group->attrs;
+	dev->groups = group->groups;
+}
+
+static ssize_t
+modalias_show(struct device *dev,
+	      struct device_attribute *attr, char *buf)
+{
+	struct fw_unit *unit = fw_unit(dev);
+	int length;
+
+	length = get_modalias(unit, buf, PAGE_SIZE);
+	strcpy(buf + length, "\n");
+
+	return length + 1;
+}
+
+static ssize_t
+rom_index_show(struct device *dev,
+	       struct device_attribute *attr, char *buf)
+{
+	struct fw_device *device = fw_device(dev->parent);
+	struct fw_unit *unit = fw_unit(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			(int)(unit->directory - device->config_rom));
+}
+
+static struct device_attribute fw_unit_attributes[] = {
+	__ATTR_RO(modalias),
+	__ATTR_RO(rom_index),
+	__ATTR_NULL,
+};
+
+static ssize_t
+config_rom_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct fw_device *device = fw_device(dev);
+
+	memcpy(buf, device->config_rom, device->config_rom_length * 4);
+
+	return device->config_rom_length * 4;
+}
+
+static ssize_t
+guid_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct fw_device *device = fw_device(dev);
+	u64 guid;
+
+	guid = ((u64)device->config_rom[3] << 32) | device->config_rom[4];
+
+	return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
+			(unsigned long long)guid);
+}
+
+static struct device_attribute fw_device_attributes[] = {
+	__ATTR_RO(config_rom),
+	__ATTR_RO(guid),
+	__ATTR_NULL,
+};
+
+struct read_quadlet_callback_data {
+	struct completion done;
+	int rcode;
+	u32 data;
+};
+
+static void
+complete_transaction(struct fw_card *card, int rcode,
+		     void *payload, size_t length, void *data)
+{
+	struct read_quadlet_callback_data *callback_data = data;
+
+	if (rcode == RCODE_COMPLETE)
+		callback_data->data = be32_to_cpu(*(__be32 *)payload);
+	callback_data->rcode = rcode;
+	complete(&callback_data->done);
+}
+
+static int read_rom(struct fw_device *device, int index, u32 * data)
+{
+	struct read_quadlet_callback_data callback_data;
+	struct fw_transaction t;
+	u64 offset;
+
+	init_completion(&callback_data.done);
+
+	offset = 0xfffff0000400ULL + index * 4;
+	fw_send_request(device->card, &t, TCODE_READ_QUADLET_REQUEST,
+			device->node_id,
+			device->generation, SCODE_100,
+			offset, NULL, 4, complete_transaction, &callback_data);
+
+	wait_for_completion(&callback_data.done);
+
+	*data = callback_data.data;
+
+	return callback_data.rcode;
+}
+
+static int read_bus_info_block(struct fw_device *device)
+{
+	static u32 rom[256];
+	u32 stack[16], sp, key;
+	int i, end, length;
+
+	/* First read the bus info block. */
+	for (i = 0; i < 5; i++) {
+		if (read_rom(device, i, &rom[i]) != RCODE_COMPLETE)
+			return -1;
+		/*
+		 * As per IEEE1212 7.2, during power-up, devices can
+		 * reply with a 0 for the first quadlet of the config
+		 * rom to indicate that they are booting (for example,
+		 * if the firmware is on the disk of a external
+		 * harddisk).  In that case we just fail, and the
+		 * retry mechanism will try again later.
+		 */
+		if (i == 0 && rom[i] == 0)
+			return -1;
+	}
+
+	/*
+	 * Now parse the config rom.  The config rom is a recursive
+	 * directory structure so we parse it using a stack of
+	 * references to the blocks that make up the structure.  We
+	 * push a reference to the root directory on the stack to
+	 * start things off.
+	 */
+	length = i;
+	sp = 0;
+	stack[sp++] = 0xc0000005;
+	while (sp > 0) {
+		/*
+		 * Pop the next block reference of the stack.  The
+		 * lower 24 bits is the offset into the config rom,
+		 * the upper 8 bits are the type of the reference the
+		 * block.
+		 */
+		key = stack[--sp];
+		i = key & 0xffffff;
+		if (i >= ARRAY_SIZE(rom))
+			/*
+			 * The reference points outside the standard
+			 * config rom area, something's fishy.
+			 */
+			return -1;
+
+		/* Read header quadlet for the block to get the length. */
+		if (read_rom(device, i, &rom[i]) != RCODE_COMPLETE)
+			return -1;
+		end = i + (rom[i] >> 16) + 1;
+		i++;
+		if (end > ARRAY_SIZE(rom))
+			/*
+			 * This block extends outside standard config
+			 * area (and the array we're reading it
+			 * into).  That's broken, so ignore this
+			 * device.
+			 */
+			return -1;
+
+		/*
+		 * Now read in the block.  If this is a directory
+		 * block, check the entries as we read them to see if
+		 * it references another block, and push it in that case.
+		 */
+		while (i < end) {
+			if (read_rom(device, i, &rom[i]) != RCODE_COMPLETE)
+				return -1;
+			if ((key >> 30) == 3 && (rom[i] >> 30) > 1 &&
+			    sp < ARRAY_SIZE(stack))
+				stack[sp++] = i + rom[i];
+			i++;
+		}
+		if (length < i)
+			length = i;
+	}
+
+	device->config_rom = kmalloc(length * 4, GFP_KERNEL);
+	if (device->config_rom == NULL)
+		return -1;
+	memcpy(device->config_rom, rom, length * 4);
+	device->config_rom_length = length;
+
+	return 0;
+}
+
+static void fw_unit_release(struct device *dev)
+{
+	struct fw_unit *unit = fw_unit(dev);
+
+	kfree(unit);
+}
+
+static struct device_type fw_unit_type = {
+	.uevent		= fw_unit_uevent,
+	.release	= fw_unit_release,
+};
+
+static int is_fw_unit(struct device *dev)
+{
+	return dev->type == &fw_unit_type;
+}
+
+static void create_units(struct fw_device *device)
+{
+	struct fw_csr_iterator ci;
+	struct fw_unit *unit;
+	int key, value, i;
+
+	i = 0;
+	fw_csr_iterator_init(&ci, &device->config_rom[5]);
+	while (fw_csr_iterator_next(&ci, &key, &value)) {
+		if (key != (CSR_UNIT | CSR_DIRECTORY))
+			continue;
+
+		/*
+		 * Get the address of the unit directory and try to
+		 * match the drivers id_tables against it.
+		 */
+		unit = kzalloc(sizeof(*unit), GFP_KERNEL);
+		if (unit == NULL) {
+			fw_error("failed to allocate memory for unit\n");
+			continue;
+		}
+
+		unit->directory = ci.p + value - 1;
+		unit->device.bus = &fw_bus_type;
+		unit->device.type = &fw_unit_type;
+		unit->device.parent = &device->device;
+		snprintf(unit->device.bus_id, sizeof(unit->device.bus_id),
+			 "%s.%d", device->device.bus_id, i++);
+
+		init_fw_attribute_group(&unit->device,
+					fw_unit_attributes,
+					&unit->attribute_group);
+		if (device_register(&unit->device) < 0)
+			goto skip_unit;
+
+		continue;
+
+	skip_unit:
+		kfree(unit);
+	}
+}
+
+static int shutdown_unit(struct device *device, void *data)
+{
+	device_unregister(device);
+
+	return 0;
+}
+
+static DECLARE_RWSEM(idr_rwsem);
+static DEFINE_IDR(fw_device_idr);
+int fw_cdev_major;
+
+struct fw_device *fw_device_from_devt(dev_t devt)
+{
+	struct fw_device *device;
+
+	down_read(&idr_rwsem);
+	device = idr_find(&fw_device_idr, MINOR(devt));
+	up_read(&idr_rwsem);
+
+	return device;
+}
+
+static void fw_device_shutdown(struct work_struct *work)
+{
+	struct fw_device *device =
+		container_of(work, struct fw_device, work.work);
+	int minor = MINOR(device->device.devt);
+
+	down_write(&idr_rwsem);
+	idr_remove(&fw_device_idr, minor);
+	up_write(&idr_rwsem);
+
+	fw_device_cdev_remove(device);
+	device_for_each_child(&device->device, NULL, shutdown_unit);
+	device_unregister(&device->device);
+}
+
+static struct device_type fw_device_type = {
+	.release	= fw_device_release,
+};
+
+/*
+ * These defines control the retry behavior for reading the config
+ * rom.  It shouldn't be necessary to tweak these; if the device
+ * doesn't respond to a config rom read within 10 seconds, it's not
+ * going to respond at all.  As for the initial delay, a lot of
+ * devices will be able to respond within half a second after bus
+ * reset.  On the other hand, it's not really worth being more
+ * aggressive than that, since it scales pretty well; if 10 devices
+ * are plugged in, they're all getting read within one second.
+ */
+
+#define MAX_RETRIES	10
+#define RETRY_DELAY	(3 * HZ)
+#define INITIAL_DELAY	(HZ / 2)
+
+static void fw_device_init(struct work_struct *work)
+{
+	struct fw_device *device =
+		container_of(work, struct fw_device, work.work);
+	int minor, err;
+
+	/*
+	 * All failure paths here set node->data to NULL, so that we
+	 * don't try to do device_for_each_child() on a kfree()'d
+	 * device.
+	 */
+
+	if (read_bus_info_block(device) < 0) {
+		if (device->config_rom_retries < MAX_RETRIES) {
+			device->config_rom_retries++;
+			schedule_delayed_work(&device->work, RETRY_DELAY);
+		} else {
+			fw_notify("giving up on config rom for node id %x\n",
+				  device->node_id);
+			if (device->node == device->card->root_node)
+				schedule_delayed_work(&device->card->work, 0);
+			fw_device_release(&device->device);
+		}
+		return;
+	}
+
+	err = -ENOMEM;
+	down_write(&idr_rwsem);
+	if (idr_pre_get(&fw_device_idr, GFP_KERNEL))
+		err = idr_get_new(&fw_device_idr, device, &minor);
+	up_write(&idr_rwsem);
+	if (err < 0)
+		goto error;
+
+	device->device.bus = &fw_bus_type;
+	device->device.type = &fw_device_type;
+	device->device.parent = device->card->device;
+	device->device.devt = MKDEV(fw_cdev_major, minor);
+	snprintf(device->device.bus_id, sizeof(device->device.bus_id),
+		 "fw%d", minor);
+
+	init_fw_attribute_group(&device->device,
+				fw_device_attributes,
+				&device->attribute_group);
+	if (device_add(&device->device)) {
+		fw_error("Failed to add device.\n");
+		goto error_with_cdev;
+	}
+
+	create_units(device);
+
+	/*
+	 * Transition the device to running state.  If it got pulled
+	 * out from under us while we did the intialization work, we
+	 * have to shut down the device again here.  Normally, though,
+	 * fw_node_event will be responsible for shutting it down when
+	 * necessary.  We have to use the atomic cmpxchg here to avoid
+	 * racing with the FW_NODE_DESTROYED case in
+	 * fw_node_event().
+	 */
+	if (atomic_cmpxchg(&device->state,
+		    FW_DEVICE_INITIALIZING,
+		    FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN)
+		fw_device_shutdown(&device->work.work);
+	else
+		fw_notify("created new fw device %s (%d config rom retries)\n",
+			  device->device.bus_id, device->config_rom_retries);
+
+	/*
+	 * Reschedule the IRM work if we just finished reading the
+	 * root node config rom.  If this races with a bus reset we
+	 * just end up running the IRM work a couple of extra times -
+	 * pretty harmless.
+	 */
+	if (device->node == device->card->root_node)
+		schedule_delayed_work(&device->card->work, 0);
+
+	return;
+
+ error_with_cdev:
+	down_write(&idr_rwsem);
+	idr_remove(&fw_device_idr, minor);
+	up_write(&idr_rwsem);
+ error:
+	put_device(&device->device);
+}
+
+static int update_unit(struct device *dev, void *data)
+{
+	struct fw_unit *unit = fw_unit(dev);
+	struct fw_driver *driver = (struct fw_driver *)dev->driver;
+
+	if (is_fw_unit(dev) && driver != NULL && driver->update != NULL) {
+		down(&dev->sem);
+		driver->update(unit);
+		up(&dev->sem);
+	}
+
+	return 0;
+}
+
+static void fw_device_update(struct work_struct *work)
+{
+	struct fw_device *device =
+		container_of(work, struct fw_device, work.work);
+
+	fw_device_cdev_update(device);
+	device_for_each_child(&device->device, NULL, update_unit);
+}
+
+void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
+{
+	struct fw_device *device;
+
+	switch (event) {
+	case FW_NODE_CREATED:
+	case FW_NODE_LINK_ON:
+		if (!node->link_on)
+			break;
+
+		device = kzalloc(sizeof(*device), GFP_ATOMIC);
+		if (device == NULL)
+			break;
+
+		/*
+		 * Do minimal intialization of the device here, the
+		 * rest will happen in fw_device_init().  We need the
+		 * card and node so we can read the config rom and we
+		 * need to do device_initialize() now so
+		 * device_for_each_child() in FW_NODE_UPDATED is
+		 * doesn't freak out.
+		 */
+		device_initialize(&device->device);
+		atomic_set(&device->state, FW_DEVICE_INITIALIZING);
+		device->card = fw_card_get(card);
+		device->node = fw_node_get(node);
+		device->node_id = node->node_id;
+		device->generation = card->generation;
+		INIT_LIST_HEAD(&device->client_list);
+
+		/*
+		 * Set the node data to point back to this device so
+		 * FW_NODE_UPDATED callbacks can update the node_id
+		 * and generation for the device.
+		 */
+		node->data = device;
+
+		/*
+		 * Many devices are slow to respond after bus resets,
+		 * especially if they are bus powered and go through
+		 * power-up after getting plugged in.  We schedule the
+		 * first config rom scan half a second after bus reset.
+		 */
+		INIT_DELAYED_WORK(&device->work, fw_device_init);
+		schedule_delayed_work(&device->work, INITIAL_DELAY);
+		break;
+
+	case FW_NODE_UPDATED:
+		if (!node->link_on || node->data == NULL)
+			break;
+
+		device = node->data;
+		device->node_id = node->node_id;
+		device->generation = card->generation;
+		if (atomic_read(&device->state) == FW_DEVICE_RUNNING) {
+			PREPARE_DELAYED_WORK(&device->work, fw_device_update);
+			schedule_delayed_work(&device->work, 0);
+		}
+		break;
+
+	case FW_NODE_DESTROYED:
+	case FW_NODE_LINK_OFF:
+		if (!node->data)
+			break;
+
+		/*
+		 * Destroy the device associated with the node.  There
+		 * are two cases here: either the device is fully
+		 * initialized (FW_DEVICE_RUNNING) or we're in the
+		 * process of reading its config rom
+		 * (FW_DEVICE_INITIALIZING).  If it is fully
+		 * initialized we can reuse device->work to schedule a
+		 * full fw_device_shutdown().  If not, there's work
+		 * scheduled to read it's config rom, and we just put
+		 * the device in shutdown state to have that code fail
+		 * to create the device.
+		 */
+		device = node->data;
+		if (atomic_xchg(&device->state,
+				FW_DEVICE_SHUTDOWN) == FW_DEVICE_RUNNING) {
+			PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
+			schedule_delayed_work(&device->work, 0);
+		}
+		break;
+	}
+}
diff --git a/drivers/firewire/fw-device.h b/drivers/firewire/fw-device.h
new file mode 100644
index 000000000000..0ba9d64ccf4c
--- /dev/null
+++ b/drivers/firewire/fw-device.h
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) 2005-2006  Kristian Hoegsberg <krh@bitplanet.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __fw_device_h
+#define __fw_device_h
+
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <asm/atomic.h>
+
+enum fw_device_state {
+	FW_DEVICE_INITIALIZING,
+	FW_DEVICE_RUNNING,
+	FW_DEVICE_SHUTDOWN,
+};
+
+struct fw_attribute_group {
+	struct attribute_group *groups[2];
+	struct attribute_group group;
+	struct attribute *attrs[11];
+};
+
+struct fw_device {
+	atomic_t state;
+	struct fw_node *node;
+	int node_id;
+	int generation;
+	struct fw_card *card;
+	struct device device;
+	struct list_head link;
+	struct list_head client_list;
+	u32 *config_rom;
+	size_t config_rom_length;
+	int config_rom_retries;
+	struct delayed_work work;
+	struct fw_attribute_group attribute_group;
+};
+
+static inline struct fw_device *
+fw_device(struct device *dev)
+{
+	return container_of(dev, struct fw_device, device);
+}
+
+static inline int
+fw_device_is_shutdown(struct fw_device *device)
+{
+	return atomic_read(&device->state) == FW_DEVICE_SHUTDOWN;
+}
+
+struct fw_device *fw_device_get(struct fw_device *device);
+void fw_device_put(struct fw_device *device);
+int fw_device_enable_phys_dma(struct fw_device *device);
+
+void fw_device_cdev_update(struct fw_device *device);
+void fw_device_cdev_remove(struct fw_device *device);
+
+struct fw_device *fw_device_from_devt(dev_t devt);
+extern int fw_cdev_major;
+
+struct fw_unit {
+	struct device device;
+	u32 *directory;
+	struct fw_attribute_group attribute_group;
+};
+
+static inline struct fw_unit *
+fw_unit(struct device *dev)
+{
+	return container_of(dev, struct fw_unit, device);
+}
+
+#define CSR_OFFSET	0x40
+#define CSR_LEAF	0x80
+#define CSR_DIRECTORY	0xc0
+
+#define CSR_DESCRIPTOR		0x01
+#define CSR_VENDOR		0x03
+#define CSR_HARDWARE_VERSION	0x04
+#define CSR_NODE_CAPABILITIES	0x0c
+#define CSR_UNIT		0x11
+#define CSR_SPECIFIER_ID	0x12
+#define CSR_VERSION		0x13
+#define CSR_DEPENDENT_INFO	0x14
+#define CSR_MODEL		0x17
+#define CSR_INSTANCE		0x18
+
+#define SBP2_COMMAND_SET_SPECIFIER	0x38
+#define SBP2_COMMAND_SET		0x39
+#define SBP2_COMMAND_SET_REVISION	0x3b
+#define SBP2_FIRMWARE_REVISION		0x3c
+
+struct fw_csr_iterator {
+	u32 *p;
+	u32 *end;
+};
+
+void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 *p);
+int fw_csr_iterator_next(struct fw_csr_iterator *ci,
+			 int *key, int *value);
+
+#define FW_MATCH_VENDOR		0x0001
+#define FW_MATCH_MODEL		0x0002
+#define FW_MATCH_SPECIFIER_ID	0x0004
+#define FW_MATCH_VERSION	0x0008
+
+struct fw_device_id {
+	u32 match_flags;
+	u32 vendor;
+	u32 model;
+	u32 specifier_id;
+	u32 version;
+	void *driver_data;
+};
+
+struct fw_driver {
+	struct device_driver driver;
+	/* Called when the parent device sits through a bus reset. */
+	void (*update) (struct fw_unit *unit);
+	const struct fw_device_id *id_table;
+};
+
+static inline struct fw_driver *
+fw_driver(struct device_driver *drv)
+{
+	return container_of(drv, struct fw_driver, driver);
+}
+
+extern const struct file_operations fw_device_ops;
+
+#endif /* __fw_device_h */
diff --git a/drivers/firewire/fw-iso.c b/drivers/firewire/fw-iso.c
new file mode 100644
index 000000000000..2b640e9be6de
--- /dev/null
+++ b/drivers/firewire/fw-iso.c
@@ -0,0 +1,163 @@
+/*
+ * Isochronous IO functionality
+ *
+ * Copyright (C) 2006 Kristian Hoegsberg <krh@bitplanet.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+
+#include "fw-transaction.h"
+#include "fw-topology.h"
+#include "fw-device.h"
+
+int
+fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
+		   int page_count, enum dma_data_direction direction)
+{
+	int i, j, retval = -ENOMEM;
+	dma_addr_t address;
+
+	buffer->page_count = page_count;
+	buffer->direction = direction;
+
+	buffer->pages = kmalloc(page_count * sizeof(buffer->pages[0]),
+				GFP_KERNEL);
+	if (buffer->pages == NULL)
+		goto out;
+
+	for (i = 0; i < buffer->page_count; i++) {
+		buffer->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
+		if (buffer->pages[i] == NULL)
+			goto out_pages;
+
+		address = dma_map_page(card->device, buffer->pages[i],
+				       0, PAGE_SIZE, direction);
+		if (dma_mapping_error(address)) {
+			__free_page(buffer->pages[i]);
+			goto out_pages;
+		}
+		set_page_private(buffer->pages[i], address);
+	}
+
+	return 0;
+
+ out_pages:
+	for (j = 0; j < i; j++) {
+		address = page_private(buffer->pages[j]);
+		dma_unmap_page(card->device, address,
+			       PAGE_SIZE, DMA_TO_DEVICE);
+		__free_page(buffer->pages[j]);
+	}
+	kfree(buffer->pages);
+ out:
+	buffer->pages = NULL;
+	return retval;
+}
+
+int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma)
+{
+	unsigned long uaddr;
+	int i, retval;
+
+	uaddr = vma->vm_start;
+	for (i = 0; i < buffer->page_count; i++) {
+		retval = vm_insert_page(vma, uaddr, buffer->pages[i]);
+		if (retval)
+			return retval;
+		uaddr += PAGE_SIZE;
+	}
+
+	return 0;
+}
+
+void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
+			   struct fw_card *card)
+{
+	int i;
+	dma_addr_t address;
+
+	for (i = 0; i < buffer->page_count; i++) {
+		address = page_private(buffer->pages[i]);
+		dma_unmap_page(card->device, address,
+			       PAGE_SIZE, DMA_TO_DEVICE);
+		__free_page(buffer->pages[i]);
+	}
+
+	kfree(buffer->pages);
+	buffer->pages = NULL;
+}
+
+struct fw_iso_context *
+fw_iso_context_create(struct fw_card *card, int type,
+		      int channel, int speed, size_t header_size,
+		      fw_iso_callback_t callback, void *callback_data)
+{
+	struct fw_iso_context *ctx;
+
+	ctx = card->driver->allocate_iso_context(card, type, header_size);
+	if (IS_ERR(ctx))
+		return ctx;
+
+	ctx->card = card;
+	ctx->type = type;
+	ctx->channel = channel;
+	ctx->speed = speed;
+	ctx->header_size = header_size;
+	ctx->callback = callback;
+	ctx->callback_data = callback_data;
+
+	return ctx;
+}
+EXPORT_SYMBOL(fw_iso_context_create);
+
+void fw_iso_context_destroy(struct fw_iso_context *ctx)
+{
+	struct fw_card *card = ctx->card;
+
+	card->driver->free_iso_context(ctx);
+}
+EXPORT_SYMBOL(fw_iso_context_destroy);
+
+int
+fw_iso_context_start(struct fw_iso_context *ctx, int cycle, int sync, int tags)
+{
+	return ctx->card->driver->start_iso(ctx, cycle, sync, tags);
+}
+EXPORT_SYMBOL(fw_iso_context_start);
+
+int
+fw_iso_context_queue(struct fw_iso_context *ctx,
+		     struct fw_iso_packet *packet,
+		     struct fw_iso_buffer *buffer,
+		     unsigned long payload)
+{
+	struct fw_card *card = ctx->card;
+
+	return card->driver->queue_iso(ctx, packet, buffer, payload);
+}
+EXPORT_SYMBOL(fw_iso_context_queue);
+
+int
+fw_iso_context_stop(struct fw_iso_context *ctx)
+{
+	return ctx->card->driver->stop_iso(ctx);
+}
+EXPORT_SYMBOL(fw_iso_context_stop);
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c
new file mode 100644
index 000000000000..1f5c70461b8b
--- /dev/null
+++ b/drivers/firewire/fw-ohci.c
@@ -0,0 +1,1943 @@
+/*
+ * Driver for OHCI 1394 controllers
+ *
+ * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/poll.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/uaccess.h>
+#include <asm/semaphore.h>
+
+#include "fw-transaction.h"
+#include "fw-ohci.h"
+
+#define DESCRIPTOR_OUTPUT_MORE		0
+#define DESCRIPTOR_OUTPUT_LAST		(1 << 12)
+#define DESCRIPTOR_INPUT_MORE		(2 << 12)
+#define DESCRIPTOR_INPUT_LAST		(3 << 12)
+#define DESCRIPTOR_STATUS		(1 << 11)
+#define DESCRIPTOR_KEY_IMMEDIATE	(2 << 8)
+#define DESCRIPTOR_PING			(1 << 7)
+#define DESCRIPTOR_YY			(1 << 6)
+#define DESCRIPTOR_NO_IRQ		(0 << 4)
+#define DESCRIPTOR_IRQ_ERROR		(1 << 4)
+#define DESCRIPTOR_IRQ_ALWAYS		(3 << 4)
+#define DESCRIPTOR_BRANCH_ALWAYS	(3 << 2)
+#define DESCRIPTOR_WAIT			(3 << 0)
+
+struct descriptor {
+	__le16 req_count;
+	__le16 control;
+	__le32 data_address;
+	__le32 branch_address;
+	__le16 res_count;
+	__le16 transfer_status;
+} __attribute__((aligned(16)));
+
+struct db_descriptor {
+	__le16 first_size;
+	__le16 control;
+	__le16 second_req_count;
+	__le16 first_req_count;
+	__le32 branch_address;
+	__le16 second_res_count;
+	__le16 first_res_count;
+	__le32 reserved0;
+	__le32 first_buffer;
+	__le32 second_buffer;
+	__le32 reserved1;
+} __attribute__((aligned(16)));
+
+#define CONTROL_SET(regs)	(regs)
+#define CONTROL_CLEAR(regs)	((regs) + 4)
+#define COMMAND_PTR(regs)	((regs) + 12)
+#define CONTEXT_MATCH(regs)	((regs) + 16)
+
+struct ar_buffer {
+	struct descriptor descriptor;
+	struct ar_buffer *next;
+	__le32 data[0];
+};
+
+struct ar_context {
+	struct fw_ohci *ohci;
+	struct ar_buffer *current_buffer;
+	struct ar_buffer *last_buffer;
+	void *pointer;
+	u32 regs;
+	struct tasklet_struct tasklet;
+};
+
+struct context;
+
+typedef int (*descriptor_callback_t)(struct context *ctx,
+				     struct descriptor *d,
+				     struct descriptor *last);
+struct context {
+	struct fw_ohci *ohci;
+	u32 regs;
+
+	struct descriptor *buffer;
+	dma_addr_t buffer_bus;
+	size_t buffer_size;
+	struct descriptor *head_descriptor;
+	struct descriptor *tail_descriptor;
+	struct descriptor *tail_descriptor_last;
+	struct descriptor *prev_descriptor;
+
+	descriptor_callback_t callback;
+
+	struct tasklet_struct tasklet;
+};
+
+#define IT_HEADER_SY(v)          ((v) <<  0)
+#define IT_HEADER_TCODE(v)       ((v) <<  4)
+#define IT_HEADER_CHANNEL(v)     ((v) <<  8)
+#define IT_HEADER_TAG(v)         ((v) << 14)
+#define IT_HEADER_SPEED(v)       ((v) << 16)
+#define IT_HEADER_DATA_LENGTH(v) ((v) << 16)
+
+struct iso_context {
+	struct fw_iso_context base;
+	struct context context;
+	void *header;
+	size_t header_length;
+};
+
+#define CONFIG_ROM_SIZE 1024
+
+struct fw_ohci {
+	struct fw_card card;
+
+	u32 version;
+	__iomem char *registers;
+	dma_addr_t self_id_bus;
+	__le32 *self_id_cpu;
+	struct tasklet_struct bus_reset_tasklet;
+	int node_id;
+	int generation;
+	int request_generation;
+	u32 bus_seconds;
+
+	/*
+	 * Spinlock for accessing fw_ohci data.  Never call out of
+	 * this driver with this lock held.
+	 */
+	spinlock_t lock;
+	u32 self_id_buffer[512];
+
+	/* Config rom buffers */
+	__be32 *config_rom;
+	dma_addr_t config_rom_bus;
+	__be32 *next_config_rom;
+	dma_addr_t next_config_rom_bus;
+	u32 next_header;
+
+	struct ar_context ar_request_ctx;
+	struct ar_context ar_response_ctx;
+	struct context at_request_ctx;
+	struct context at_response_ctx;
+
+	u32 it_context_mask;
+	struct iso_context *it_context_list;
+	u32 ir_context_mask;
+	struct iso_context *ir_context_list;
+};
+
+static inline struct fw_ohci *fw_ohci(struct fw_card *card)
+{
+	return container_of(card, struct fw_ohci, card);
+}
+
+#define IT_CONTEXT_CYCLE_MATCH_ENABLE	0x80000000
+#define IR_CONTEXT_BUFFER_FILL		0x80000000
+#define IR_CONTEXT_ISOCH_HEADER		0x40000000
+#define IR_CONTEXT_CYCLE_MATCH_ENABLE	0x20000000
+#define IR_CONTEXT_MULTI_CHANNEL_MODE	0x10000000
+#define IR_CONTEXT_DUAL_BUFFER_MODE	0x08000000
+
+#define CONTEXT_RUN	0x8000
+#define CONTEXT_WAKE	0x1000
+#define CONTEXT_DEAD	0x0800
+#define CONTEXT_ACTIVE	0x0400
+
+#define OHCI1394_MAX_AT_REQ_RETRIES	0x2
+#define OHCI1394_MAX_AT_RESP_RETRIES	0x2
+#define OHCI1394_MAX_PHYS_RESP_RETRIES	0x8
+
+#define FW_OHCI_MAJOR			240
+#define OHCI1394_REGISTER_SIZE		0x800
+#define OHCI_LOOP_COUNT			500
+#define OHCI1394_PCI_HCI_Control	0x40
+#define SELF_ID_BUF_SIZE		0x800
+#define OHCI_TCODE_PHY_PACKET		0x0e
+#define OHCI_VERSION_1_1		0x010010
+#define ISO_BUFFER_SIZE			(64 * 1024)
+#define AT_BUFFER_SIZE			4096
+
+static char ohci_driver_name[] = KBUILD_MODNAME;
+
+static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
+{
+	writel(data, ohci->registers + offset);
+}
+
+static inline u32 reg_read(const struct fw_ohci *ohci, int offset)
+{
+	return readl(ohci->registers + offset);
+}
+
+static inline void flush_writes(const struct fw_ohci *ohci)
+{
+	/* Do a dummy read to flush writes. */
+	reg_read(ohci, OHCI1394_Version);
+}
+
+static int
+ohci_update_phy_reg(struct fw_card *card, int addr,
+		    int clear_bits, int set_bits)
+{
+	struct fw_ohci *ohci = fw_ohci(card);
+	u32 val, old;
+
+	reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
+	msleep(2);
+	val = reg_read(ohci, OHCI1394_PhyControl);
+	if ((val & OHCI1394_PhyControl_ReadDone) == 0) {
+		fw_error("failed to set phy reg bits.\n");
+		return -EBUSY;
+	}
+
+	old = OHCI1394_PhyControl_ReadData(val);
+	old = (old & ~clear_bits) | set_bits;
+	reg_write(ohci, OHCI1394_PhyControl,
+		  OHCI1394_PhyControl_Write(addr, old));
+
+	return 0;
+}
+
+static int ar_context_add_page(struct ar_context *ctx)
+{
+	struct device *dev = ctx->ohci->card.device;
+	struct ar_buffer *ab;
+	dma_addr_t ab_bus;
+	size_t offset;
+
+	ab = (struct ar_buffer *) __get_free_page(GFP_ATOMIC);
+	if (ab == NULL)
+		return -ENOMEM;
+
+	ab_bus = dma_map_single(dev, ab, PAGE_SIZE, DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(ab_bus)) {
+		free_page((unsigned long) ab);
+		return -ENOMEM;
+	}
+
+	memset(&ab->descriptor, 0, sizeof(ab->descriptor));
+	ab->descriptor.control        = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
+						    DESCRIPTOR_STATUS |
+						    DESCRIPTOR_BRANCH_ALWAYS);
+	offset = offsetof(struct ar_buffer, data);
+	ab->descriptor.req_count      = cpu_to_le16(PAGE_SIZE - offset);
+	ab->descriptor.data_address   = cpu_to_le32(ab_bus + offset);
+	ab->descriptor.res_count      = cpu_to_le16(PAGE_SIZE - offset);
+	ab->descriptor.branch_address = 0;
+
+	dma_sync_single_for_device(dev, ab_bus, PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+	ctx->last_buffer->descriptor.branch_address = ab_bus | 1;
+	ctx->last_buffer->next = ab;
+	ctx->last_buffer = ab;
+
+	reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
+	flush_writes(ctx->ohci);
+
+	return 0;
+}
+
+static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
+{
+	struct fw_ohci *ohci = ctx->ohci;
+	struct fw_packet p;
+	u32 status, length, tcode;
+
+	p.header[0] = le32_to_cpu(buffer[0]);
+	p.header[1] = le32_to_cpu(buffer[1]);
+	p.header[2] = le32_to_cpu(buffer[2]);
+
+	tcode = (p.header[0] >> 4) & 0x0f;
+	switch (tcode) {
+	case TCODE_WRITE_QUADLET_REQUEST:
+	case TCODE_READ_QUADLET_RESPONSE:
+		p.header[3] = (__force __u32) buffer[3];
+		p.header_length = 16;
+		p.payload_length = 0;
+		break;
+
+	case TCODE_READ_BLOCK_REQUEST :
+		p.header[3] = le32_to_cpu(buffer[3]);
+		p.header_length = 16;
+		p.payload_length = 0;
+		break;
+
+	case TCODE_WRITE_BLOCK_REQUEST:
+	case TCODE_READ_BLOCK_RESPONSE:
+	case TCODE_LOCK_REQUEST:
+	case TCODE_LOCK_RESPONSE:
+		p.header[3] = le32_to_cpu(buffer[3]);
+		p.header_length = 16;
+		p.payload_length = p.header[3] >> 16;
+		break;
+
+	case TCODE_WRITE_RESPONSE:
+	case TCODE_READ_QUADLET_REQUEST:
+	case OHCI_TCODE_PHY_PACKET:
+		p.header_length = 12;
+		p.payload_length = 0;
+		break;
+	}
+
+	p.payload = (void *) buffer + p.header_length;
+
+	/* FIXME: What to do about evt_* errors? */
+	length = (p.header_length + p.payload_length + 3) / 4;
+	status = le32_to_cpu(buffer[length]);
+
+	p.ack        = ((status >> 16) & 0x1f) - 16;
+	p.speed      = (status >> 21) & 0x7;
+	p.timestamp  = status & 0xffff;
+	p.generation = ohci->request_generation;
+
+	/*
+	 * The OHCI bus reset handler synthesizes a phy packet with
+	 * the new generation number when a bus reset happens (see
+	 * section 8.4.2.3).  This helps us determine when a request
+	 * was received and make sure we send the response in the same
+	 * generation.  We only need this for requests; for responses
+	 * we use the unique tlabel for finding the matching
+	 * request.
+	 */
+
+	if (p.ack + 16 == 0x09)
+		ohci->request_generation = (buffer[2] >> 16) & 0xff;
+	else if (ctx == &ohci->ar_request_ctx)
+		fw_core_handle_request(&ohci->card, &p);
+	else
+		fw_core_handle_response(&ohci->card, &p);
+
+	return buffer + length + 1;
+}
+
+static void ar_context_tasklet(unsigned long data)
+{
+	struct ar_context *ctx = (struct ar_context *)data;
+	struct fw_ohci *ohci = ctx->ohci;
+	struct ar_buffer *ab;
+	struct descriptor *d;
+	void *buffer, *end;
+
+	ab = ctx->current_buffer;
+	d = &ab->descriptor;
+
+	if (d->res_count == 0) {
+		size_t size, rest, offset;
+
+		/*
+		 * This descriptor is finished and we may have a
+		 * packet split across this and the next buffer. We
+		 * reuse the page for reassembling the split packet.
+		 */
+
+		offset = offsetof(struct ar_buffer, data);
+		dma_unmap_single(ohci->card.device,
+				 ab->descriptor.data_address - offset,
+				 PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+		buffer = ab;
+		ab = ab->next;
+		d = &ab->descriptor;
+		size = buffer + PAGE_SIZE - ctx->pointer;
+		rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count);
+		memmove(buffer, ctx->pointer, size);
+		memcpy(buffer + size, ab->data, rest);
+		ctx->current_buffer = ab;
+		ctx->pointer = (void *) ab->data + rest;
+		end = buffer + size + rest;
+
+		while (buffer < end)
+			buffer = handle_ar_packet(ctx, buffer);
+
+		free_page((unsigned long)buffer);
+		ar_context_add_page(ctx);
+	} else {
+		buffer = ctx->pointer;
+		ctx->pointer = end =
+			(void *) ab + PAGE_SIZE - le16_to_cpu(d->res_count);
+
+		while (buffer < end)
+			buffer = handle_ar_packet(ctx, buffer);
+	}
+}
+
+static int
+ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, u32 regs)
+{
+	struct ar_buffer ab;
+
+	ctx->regs        = regs;
+	ctx->ohci        = ohci;
+	ctx->last_buffer = &ab;
+	tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
+
+	ar_context_add_page(ctx);
+	ar_context_add_page(ctx);
+	ctx->current_buffer = ab.next;
+	ctx->pointer = ctx->current_buffer->data;
+
+	reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ab.descriptor.branch_address);
+	reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
+	flush_writes(ctx->ohci);
+
+	return 0;
+}
+
+static void context_tasklet(unsigned long data)
+{
+	struct context *ctx = (struct context *) data;
+	struct fw_ohci *ohci = ctx->ohci;
+	struct descriptor *d, *last;
+	u32 address;
+	int z;
+
+	dma_sync_single_for_cpu(ohci->card.device, ctx->buffer_bus,
+				ctx->buffer_size, DMA_TO_DEVICE);
+
+	d    = ctx->tail_descriptor;
+	last = ctx->tail_descriptor_last;
+
+	while (last->branch_address != 0) {
+		address = le32_to_cpu(last->branch_address);
+		z = address & 0xf;
+		d = ctx->buffer + (address - ctx->buffer_bus) / sizeof(*d);
+		last = (z == 2) ? d : d + z - 1;
+
+		if (!ctx->callback(ctx, d, last))
+			break;
+
+		ctx->tail_descriptor      = d;
+		ctx->tail_descriptor_last = last;
+	}
+}
+
+static int
+context_init(struct context *ctx, struct fw_ohci *ohci,
+	     size_t buffer_size, u32 regs,
+	     descriptor_callback_t callback)
+{
+	ctx->ohci = ohci;
+	ctx->regs = regs;
+	ctx->buffer_size = buffer_size;
+	ctx->buffer = kmalloc(buffer_size, GFP_KERNEL);
+	if (ctx->buffer == NULL)
+		return -ENOMEM;
+
+	tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
+	ctx->callback = callback;
+
+	ctx->buffer_bus =
+		dma_map_single(ohci->card.device, ctx->buffer,
+			       buffer_size, DMA_TO_DEVICE);
+	if (dma_mapping_error(ctx->buffer_bus)) {
+		kfree(ctx->buffer);
+		return -ENOMEM;
+	}
+
+	ctx->head_descriptor      = ctx->buffer;
+	ctx->prev_descriptor      = ctx->buffer;
+	ctx->tail_descriptor      = ctx->buffer;
+	ctx->tail_descriptor_last = ctx->buffer;
+
+	/*
+	 * We put a dummy descriptor in the buffer that has a NULL
+	 * branch address and looks like it's been sent.  That way we
+	 * have a descriptor to append DMA programs to.  Also, the
+	 * ring buffer invariant is that it always has at least one
+	 * element so that head == tail means buffer full.
+	 */
+
+	memset(ctx->head_descriptor, 0, sizeof(*ctx->head_descriptor));
+	ctx->head_descriptor->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
+	ctx->head_descriptor->transfer_status = cpu_to_le16(0x8011);
+	ctx->head_descriptor++;
+
+	return 0;
+}
+
+static void
+context_release(struct context *ctx)
+{
+	struct fw_card *card = &ctx->ohci->card;
+
+	dma_unmap_single(card->device, ctx->buffer_bus,
+			 ctx->buffer_size, DMA_TO_DEVICE);
+	kfree(ctx->buffer);
+}
+
+static struct descriptor *
+context_get_descriptors(struct context *ctx, int z, dma_addr_t *d_bus)
+{
+	struct descriptor *d, *tail, *end;
+
+	d = ctx->head_descriptor;
+	tail = ctx->tail_descriptor;
+	end = ctx->buffer + ctx->buffer_size / sizeof(*d);
+
+	if (d + z <= tail) {
+		goto has_space;
+	} else if (d > tail && d + z <= end) {
+		goto has_space;
+	} else if (d > tail && ctx->buffer + z <= tail) {
+		d = ctx->buffer;
+		goto has_space;
+	}
+
+	return NULL;
+
+ has_space:
+	memset(d, 0, z * sizeof(*d));
+	*d_bus = ctx->buffer_bus + (d - ctx->buffer) * sizeof(*d);
+
+	return d;
+}
+
+static void context_run(struct context *ctx, u32 extra)
+{
+	struct fw_ohci *ohci = ctx->ohci;
+
+	reg_write(ohci, COMMAND_PTR(ctx->regs),
+		  le32_to_cpu(ctx->tail_descriptor_last->branch_address));
+	reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
+	reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
+	flush_writes(ohci);
+}
+
+static void context_append(struct context *ctx,
+			   struct descriptor *d, int z, int extra)
+{
+	dma_addr_t d_bus;
+
+	d_bus = ctx->buffer_bus + (d - ctx->buffer) * sizeof(*d);
+
+	ctx->head_descriptor = d + z + extra;
+	ctx->prev_descriptor->branch_address = cpu_to_le32(d_bus | z);
+	ctx->prev_descriptor = z == 2 ? d : d + z - 1;
+
+	dma_sync_single_for_device(ctx->ohci->card.device, ctx->buffer_bus,
+				   ctx->buffer_size, DMA_TO_DEVICE);
+
+	reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
+	flush_writes(ctx->ohci);
+}
+
+static void context_stop(struct context *ctx)
+{
+	u32 reg;
+	int i;
+
+	reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
+	flush_writes(ctx->ohci);
+
+	for (i = 0; i < 10; i++) {
+		reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
+		if ((reg & CONTEXT_ACTIVE) == 0)
+			break;
+
+		fw_notify("context_stop: still active (0x%08x)\n", reg);
+		msleep(1);
+	}
+}
+
+struct driver_data {
+	struct fw_packet *packet;
+};
+
+/*
+ * This function apppends a packet to the DMA queue for transmission.
+ * Must always be called with the ochi->lock held to ensure proper
+ * generation handling and locking around packet queue manipulation.
+ */
+static int
+at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
+{
+	struct fw_ohci *ohci = ctx->ohci;
+	dma_addr_t d_bus, payload_bus;
+	struct driver_data *driver_data;
+	struct descriptor *d, *last;
+	__le32 *header;
+	int z, tcode;
+	u32 reg;
+
+	d = context_get_descriptors(ctx, 4, &d_bus);
+	if (d == NULL) {
+		packet->ack = RCODE_SEND_ERROR;
+		return -1;
+	}
+
+	d[0].control   = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
+	d[0].res_count = cpu_to_le16(packet->timestamp);
+
+	/*
+	 * The DMA format for asyncronous link packets is different
+	 * from the IEEE1394 layout, so shift the fields around
+	 * accordingly.  If header_length is 8, it's a PHY packet, to
+	 * which we need to prepend an extra quadlet.
+	 */
+
+	header = (__le32 *) &d[1];
+	if (packet->header_length > 8) {
+		header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
+					(packet->speed << 16));
+		header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
+					(packet->header[0] & 0xffff0000));
+		header[2] = cpu_to_le32(packet->header[2]);
+
+		tcode = (packet->header[0] >> 4) & 0x0f;
+		if (TCODE_IS_BLOCK_PACKET(tcode))
+			header[3] = cpu_to_le32(packet->header[3]);
+		else
+			header[3] = (__force __le32) packet->header[3];
+
+		d[0].req_count = cpu_to_le16(packet->header_length);
+	} else {
+		header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
+					(packet->speed << 16));
+		header[1] = cpu_to_le32(packet->header[0]);
+		header[2] = cpu_to_le32(packet->header[1]);
+		d[0].req_count = cpu_to_le16(12);
+	}
+
+	driver_data = (struct driver_data *) &d[3];
+	driver_data->packet = packet;
+	packet->driver_data = driver_data;
+	
+	if (packet->payload_length > 0) {
+		payload_bus =
+			dma_map_single(ohci->card.device, packet->payload,
+				       packet->payload_length, DMA_TO_DEVICE);
+		if (dma_mapping_error(payload_bus)) {
+			packet->ack = RCODE_SEND_ERROR;
+			return -1;
+		}
+
+		d[2].req_count    = cpu_to_le16(packet->payload_length);
+		d[2].data_address = cpu_to_le32(payload_bus);
+		last = &d[2];
+		z = 3;
+	} else {
+		last = &d[0];
+		z = 2;
+	}
+
+	last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
+				     DESCRIPTOR_IRQ_ALWAYS |
+				     DESCRIPTOR_BRANCH_ALWAYS);
+
+	/* FIXME: Document how the locking works. */
+	if (ohci->generation != packet->generation) {
+		packet->ack = RCODE_GENERATION;
+		return -1;
+	}
+
+	context_append(ctx, d, z, 4 - z);
+
+	/* If the context isn't already running, start it up. */
+	reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
+	if ((reg & CONTEXT_RUN) == 0)
+		context_run(ctx, 0);
+
+	return 0;
+}
+
+static int handle_at_packet(struct context *context,
+			    struct descriptor *d,
+			    struct descriptor *last)
+{
+	struct driver_data *driver_data;
+	struct fw_packet *packet;
+	struct fw_ohci *ohci = context->ohci;
+	dma_addr_t payload_bus;
+	int evt;
+
+	if (last->transfer_status == 0)
+		/* This descriptor isn't done yet, stop iteration. */
+		return 0;
+
+	driver_data = (struct driver_data *) &d[3];
+	packet = driver_data->packet;
+	if (packet == NULL)
+		/* This packet was cancelled, just continue. */
+		return 1;
+
+	payload_bus = le32_to_cpu(last->data_address);
+	if (payload_bus != 0)
+		dma_unmap_single(ohci->card.device, payload_bus,
+				 packet->payload_length, DMA_TO_DEVICE);
+
+	evt = le16_to_cpu(last->transfer_status) & 0x1f;
+	packet->timestamp = le16_to_cpu(last->res_count);
+
+	switch (evt) {
+	case OHCI1394_evt_timeout:
+		/* Async response transmit timed out. */
+		packet->ack = RCODE_CANCELLED;
+		break;
+
+	case OHCI1394_evt_flushed:
+		/*
+		 * The packet was flushed should give same error as
+		 * when we try to use a stale generation count.
+		 */
+		packet->ack = RCODE_GENERATION;
+		break;
+
+	case OHCI1394_evt_missing_ack:
+		/*
+		 * Using a valid (current) generation count, but the
+		 * node is not on the bus or not sending acks.
+		 */
+		packet->ack = RCODE_NO_ACK;
+		break;
+
+	case ACK_COMPLETE + 0x10:
+	case ACK_PENDING + 0x10:
+	case ACK_BUSY_X + 0x10:
+	case ACK_BUSY_A + 0x10:
+	case ACK_BUSY_B + 0x10:
+	case ACK_DATA_ERROR + 0x10:
+	case ACK_TYPE_ERROR + 0x10:
+		packet->ack = evt - 0x10;
+		break;
+
+	default:
+		packet->ack = RCODE_SEND_ERROR;
+		break;
+	}
+
+	packet->callback(packet, &ohci->card, packet->ack);
+
+	return 1;
+}
+
+#define HEADER_GET_DESTINATION(q)	(((q) >> 16) & 0xffff)
+#define HEADER_GET_TCODE(q)		(((q) >> 4) & 0x0f)
+#define HEADER_GET_OFFSET_HIGH(q)	(((q) >> 0) & 0xffff)
+#define HEADER_GET_DATA_LENGTH(q)	(((q) >> 16) & 0xffff)
+#define HEADER_GET_EXTENDED_TCODE(q)	(((q) >> 0) & 0xffff)
+
+static void
+handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
+{
+	struct fw_packet response;
+	int tcode, length, i;
+
+	tcode = HEADER_GET_TCODE(packet->header[0]);
+	if (TCODE_IS_BLOCK_PACKET(tcode))
+		length = HEADER_GET_DATA_LENGTH(packet->header[3]);
+	else
+		length = 4;
+
+	i = csr - CSR_CONFIG_ROM;
+	if (i + length > CONFIG_ROM_SIZE) {
+		fw_fill_response(&response, packet->header,
+				 RCODE_ADDRESS_ERROR, NULL, 0);
+	} else if (!TCODE_IS_READ_REQUEST(tcode)) {
+		fw_fill_response(&response, packet->header,
+				 RCODE_TYPE_ERROR, NULL, 0);
+	} else {
+		fw_fill_response(&response, packet->header, RCODE_COMPLETE,
+				 (void *) ohci->config_rom + i, length);
+	}
+
+	fw_core_handle_response(&ohci->card, &response);
+}
+
+static void
+handle_local_lock(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
+{
+	struct fw_packet response;
+	int tcode, length, ext_tcode, sel;
+	__be32 *payload, lock_old;
+	u32 lock_arg, lock_data;
+
+	tcode = HEADER_GET_TCODE(packet->header[0]);
+	length = HEADER_GET_DATA_LENGTH(packet->header[3]);
+	payload = packet->payload;
+	ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]);
+
+	if (tcode == TCODE_LOCK_REQUEST &&
+	    ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) {
+		lock_arg = be32_to_cpu(payload[0]);
+		lock_data = be32_to_cpu(payload[1]);
+	} else if (tcode == TCODE_READ_QUADLET_REQUEST) {
+		lock_arg = 0;
+		lock_data = 0;
+	} else {
+		fw_fill_response(&response, packet->header,
+				 RCODE_TYPE_ERROR, NULL, 0);
+		goto out;
+	}
+
+	sel = (csr - CSR_BUS_MANAGER_ID) / 4;
+	reg_write(ohci, OHCI1394_CSRData, lock_data);
+	reg_write(ohci, OHCI1394_CSRCompareData, lock_arg);
+	reg_write(ohci, OHCI1394_CSRControl, sel);
+
+	if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
+		lock_old = cpu_to_be32(reg_read(ohci, OHCI1394_CSRData));
+	else
+		fw_notify("swap not done yet\n");
+
+	fw_fill_response(&response, packet->header,
+			 RCODE_COMPLETE, &lock_old, sizeof(lock_old));
+ out:
+	fw_core_handle_response(&ohci->card, &response);
+}
+
+static void
+handle_local_request(struct context *ctx, struct fw_packet *packet)
+{
+	u64 offset;
+	u32 csr;
+
+	if (ctx == &ctx->ohci->at_request_ctx) {
+		packet->ack = ACK_PENDING;
+		packet->callback(packet, &ctx->ohci->card, packet->ack);
+	}
+
+	offset =
+		((unsigned long long)
+		 HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) |
+		packet->header[2];
+	csr = offset - CSR_REGISTER_BASE;
+
+	/* Handle config rom reads. */
+	if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END)
+		handle_local_rom(ctx->ohci, packet, csr);
+	else switch (csr) {
+	case CSR_BUS_MANAGER_ID:
+	case CSR_BANDWIDTH_AVAILABLE:
+	case CSR_CHANNELS_AVAILABLE_HI:
+	case CSR_CHANNELS_AVAILABLE_LO:
+		handle_local_lock(ctx->ohci, packet, csr);
+		break;
+	default:
+		if (ctx == &ctx->ohci->at_request_ctx)
+			fw_core_handle_request(&ctx->ohci->card, packet);
+		else
+			fw_core_handle_response(&ctx->ohci->card, packet);
+		break;
+	}
+
+	if (ctx == &ctx->ohci->at_response_ctx) {
+		packet->ack = ACK_COMPLETE;
+		packet->callback(packet, &ctx->ohci->card, packet->ack);
+	}
+}
+
+static void
+at_context_transmit(struct context *ctx, struct fw_packet *packet)
+{
+	unsigned long flags;
+	int retval;
+
+	spin_lock_irqsave(&ctx->ohci->lock, flags);
+
+	if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id &&
+	    ctx->ohci->generation == packet->generation) {
+		spin_unlock_irqrestore(&ctx->ohci->lock, flags);
+		handle_local_request(ctx, packet);
+		return;
+	}
+
+	retval = at_context_queue_packet(ctx, packet);
+	spin_unlock_irqrestore(&ctx->ohci->lock, flags);
+
+	if (retval < 0)
+		packet->callback(packet, &ctx->ohci->card, packet->ack);
+	
+}
+
+static void bus_reset_tasklet(unsigned long data)
+{
+	struct fw_ohci *ohci = (struct fw_ohci *)data;
+	int self_id_count, i, j, reg;
+	int generation, new_generation;
+	unsigned long flags;
+
+	reg = reg_read(ohci, OHCI1394_NodeID);
+	if (!(reg & OHCI1394_NodeID_idValid)) {
+		fw_error("node ID not valid, new bus reset in progress\n");
+		return;
+	}
+	ohci->node_id = reg & 0xffff;
+
+	/*
+	 * The count in the SelfIDCount register is the number of
+	 * bytes in the self ID receive buffer.  Since we also receive
+	 * the inverted quadlets and a header quadlet, we shift one
+	 * bit extra to get the actual number of self IDs.
+	 */
+
+	self_id_count = (reg_read(ohci, OHCI1394_SelfIDCount) >> 3) & 0x3ff;
+	generation = (le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff;
+
+	for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
+		if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1])
+			fw_error("inconsistent self IDs\n");
+		ohci->self_id_buffer[j] = le32_to_cpu(ohci->self_id_cpu[i]);
+	}
+
+	/*
+	 * Check the consistency of the self IDs we just read.  The
+	 * problem we face is that a new bus reset can start while we
+	 * read out the self IDs from the DMA buffer. If this happens,
+	 * the DMA buffer will be overwritten with new self IDs and we
+	 * will read out inconsistent data.  The OHCI specification
+	 * (section 11.2) recommends a technique similar to
+	 * linux/seqlock.h, where we remember the generation of the
+	 * self IDs in the buffer before reading them out and compare
+	 * it to the current generation after reading them out.  If
+	 * the two generations match we know we have a consistent set
+	 * of self IDs.
+	 */
+
+	new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff;
+	if (new_generation != generation) {
+		fw_notify("recursive bus reset detected, "
+			  "discarding self ids\n");
+		return;
+	}
+
+	/* FIXME: Document how the locking works. */
+	spin_lock_irqsave(&ohci->lock, flags);
+
+	ohci->generation = generation;
+	context_stop(&ohci->at_request_ctx);
+	context_stop(&ohci->at_response_ctx);
+	reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
+
+	/*
+	 * This next bit is unrelated to the AT context stuff but we
+	 * have to do it under the spinlock also.  If a new config rom
+	 * was set up before this reset, the old one is now no longer
+	 * in use and we can free it. Update the config rom pointers
+	 * to point to the current config rom and clear the
+	 * next_config_rom pointer so a new udpate can take place.
+	 */
+
+	if (ohci->next_config_rom != NULL) {
+		dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
+				  ohci->config_rom, ohci->config_rom_bus);
+		ohci->config_rom      = ohci->next_config_rom;
+		ohci->config_rom_bus  = ohci->next_config_rom_bus;
+		ohci->next_config_rom = NULL;
+
+		/*
+		 * Restore config_rom image and manually update
+		 * config_rom registers.  Writing the header quadlet
+		 * will indicate that the config rom is ready, so we
+		 * do that last.
+		 */
+		reg_write(ohci, OHCI1394_BusOptions,
+			  be32_to_cpu(ohci->config_rom[2]));
+		ohci->config_rom[0] = cpu_to_be32(ohci->next_header);
+		reg_write(ohci, OHCI1394_ConfigROMhdr, ohci->next_header);
+	}
+
+	spin_unlock_irqrestore(&ohci->lock, flags);
+
+	fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
+				 self_id_count, ohci->self_id_buffer);
+}
+
+static irqreturn_t irq_handler(int irq, void *data)
+{
+	struct fw_ohci *ohci = data;
+	u32 event, iso_event, cycle_time;
+	int i;
+
+	event = reg_read(ohci, OHCI1394_IntEventClear);
+
+	if (!event)
+		return IRQ_NONE;
+
+	reg_write(ohci, OHCI1394_IntEventClear, event);
+
+	if (event & OHCI1394_selfIDComplete)
+		tasklet_schedule(&ohci->bus_reset_tasklet);
+
+	if (event & OHCI1394_RQPkt)
+		tasklet_schedule(&ohci->ar_request_ctx.tasklet);
+
+	if (event & OHCI1394_RSPkt)
+		tasklet_schedule(&ohci->ar_response_ctx.tasklet);
+
+	if (event & OHCI1394_reqTxComplete)
+		tasklet_schedule(&ohci->at_request_ctx.tasklet);
+
+	if (event & OHCI1394_respTxComplete)
+		tasklet_schedule(&ohci->at_response_ctx.tasklet);
+
+	iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
+	reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event);
+
+	while (iso_event) {
+		i = ffs(iso_event) - 1;
+		tasklet_schedule(&ohci->ir_context_list[i].context.tasklet);
+		iso_event &= ~(1 << i);
+	}
+
+	iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear);
+	reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event);
+
+	while (iso_event) {
+		i = ffs(iso_event) - 1;
+		tasklet_schedule(&ohci->it_context_list[i].context.tasklet);
+		iso_event &= ~(1 << i);
+	}
+
+	if (event & OHCI1394_cycle64Seconds) {
+		cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
+		if ((cycle_time & 0x80000000) == 0)
+			ohci->bus_seconds++;
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
+{
+	struct fw_ohci *ohci = fw_ohci(card);
+	struct pci_dev *dev = to_pci_dev(card->device);
+
+	/*
+	 * When the link is not yet enabled, the atomic config rom
+	 * update mechanism described below in ohci_set_config_rom()
+	 * is not active.  We have to update ConfigRomHeader and
+	 * BusOptions manually, and the write to ConfigROMmap takes
+	 * effect immediately.  We tie this to the enabling of the
+	 * link, so we have a valid config rom before enabling - the
+	 * OHCI requires that ConfigROMhdr and BusOptions have valid
+	 * values before enabling.
+	 *
+	 * However, when the ConfigROMmap is written, some controllers
+	 * always read back quadlets 0 and 2 from the config rom to
+	 * the ConfigRomHeader and BusOptions registers on bus reset.
+	 * They shouldn't do that in this initial case where the link
+	 * isn't enabled.  This means we have to use the same
+	 * workaround here, setting the bus header to 0 and then write
+	 * the right values in the bus reset tasklet.
+	 */
+
+	ohci->next_config_rom =
+		dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
+				   &ohci->next_config_rom_bus, GFP_KERNEL);
+	if (ohci->next_config_rom == NULL)
+		return -ENOMEM;
+
+	memset(ohci->next_config_rom, 0, CONFIG_ROM_SIZE);
+	fw_memcpy_to_be32(ohci->next_config_rom, config_rom, length * 4);
+
+	ohci->next_header = config_rom[0];
+	ohci->next_config_rom[0] = 0;
+	reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
+	reg_write(ohci, OHCI1394_BusOptions, config_rom[2]);
+	reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
+
+	reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
+
+	if (request_irq(dev->irq, irq_handler,
+			IRQF_SHARED, ohci_driver_name, ohci)) {
+		fw_error("Failed to allocate shared interrupt %d.\n",
+			 dev->irq);
+		dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
+				  ohci->config_rom, ohci->config_rom_bus);
+		return -EIO;
+	}
+
+	reg_write(ohci, OHCI1394_HCControlSet,
+		  OHCI1394_HCControl_linkEnable |
+		  OHCI1394_HCControl_BIBimageValid);
+	flush_writes(ohci);
+
+	/*
+	 * We are ready to go, initiate bus reset to finish the
+	 * initialization.
+	 */
+
+	fw_core_initiate_bus_reset(&ohci->card, 1);
+
+	return 0;
+}
+
+static int
+ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length)
+{
+	struct fw_ohci *ohci;
+	unsigned long flags;
+	int retval = 0;
+	__be32 *next_config_rom;
+	dma_addr_t next_config_rom_bus;
+
+	ohci = fw_ohci(card);
+
+	/*
+	 * When the OHCI controller is enabled, the config rom update
+	 * mechanism is a bit tricky, but easy enough to use.  See
+	 * section 5.5.6 in the OHCI specification.
+	 *
+	 * The OHCI controller caches the new config rom address in a
+	 * shadow register (ConfigROMmapNext) and needs a bus reset
+	 * for the changes to take place.  When the bus reset is
+	 * detected, the controller loads the new values for the
+	 * ConfigRomHeader and BusOptions registers from the specified
+	 * config rom and loads ConfigROMmap from the ConfigROMmapNext
+	 * shadow register. All automatically and atomically.
+	 *
+	 * Now, there's a twist to this story.  The automatic load of
+	 * ConfigRomHeader and BusOptions doesn't honor the
+	 * noByteSwapData bit, so with a be32 config rom, the
+	 * controller will load be32 values in to these registers
+	 * during the atomic update, even on litte endian
+	 * architectures.  The workaround we use is to put a 0 in the
+	 * header quadlet; 0 is endian agnostic and means that the
+	 * config rom isn't ready yet.  In the bus reset tasklet we
+	 * then set up the real values for the two registers.
+	 *
+	 * We use ohci->lock to avoid racing with the code that sets
+	 * ohci->next_config_rom to NULL (see bus_reset_tasklet).
+	 */
+
+	next_config_rom =
+		dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
+				   &next_config_rom_bus, GFP_KERNEL);
+	if (next_config_rom == NULL)
+		return -ENOMEM;
+
+	spin_lock_irqsave(&ohci->lock, flags);
+
+	if (ohci->next_config_rom == NULL) {
+		ohci->next_config_rom = next_config_rom;
+		ohci->next_config_rom_bus = next_config_rom_bus;
+
+		memset(ohci->next_config_rom, 0, CONFIG_ROM_SIZE);
+		fw_memcpy_to_be32(ohci->next_config_rom, config_rom,
+				  length * 4);
+
+		ohci->next_header = config_rom[0];
+		ohci->next_config_rom[0] = 0;
+
+		reg_write(ohci, OHCI1394_ConfigROMmap,
+			  ohci->next_config_rom_bus);
+	} else {
+		dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
+				  next_config_rom, next_config_rom_bus);
+		retval = -EBUSY;
+	}
+
+	spin_unlock_irqrestore(&ohci->lock, flags);
+
+	/*
+	 * Now initiate a bus reset to have the changes take
+	 * effect. We clean up the old config rom memory and DMA
+	 * mappings in the bus reset tasklet, since the OHCI
+	 * controller could need to access it before the bus reset
+	 * takes effect.
+	 */
+	if (retval == 0)
+		fw_core_initiate_bus_reset(&ohci->card, 1);
+
+	return retval;
+}
+
+static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
+{
+	struct fw_ohci *ohci = fw_ohci(card);
+
+	at_context_transmit(&ohci->at_request_ctx, packet);
+}
+
+static void ohci_send_response(struct fw_card *card, struct fw_packet *packet)
+{
+	struct fw_ohci *ohci = fw_ohci(card);
+
+	at_context_transmit(&ohci->at_response_ctx, packet);
+}
+
+static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
+{
+	struct fw_ohci *ohci = fw_ohci(card);
+	struct context *ctx = &ohci->at_request_ctx;
+	struct driver_data *driver_data = packet->driver_data;
+	int retval = -ENOENT;
+
+	tasklet_disable(&ctx->tasklet);
+
+	if (packet->ack != 0)
+		goto out;
+
+	driver_data->packet = NULL;
+	packet->ack = RCODE_CANCELLED;
+	packet->callback(packet, &ohci->card, packet->ack);
+	retval = 0;
+
+ out:
+	tasklet_enable(&ctx->tasklet);
+
+	return retval;
+}
+
+static int
+ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation)
+{
+	struct fw_ohci *ohci = fw_ohci(card);
+	unsigned long flags;
+	int n, retval = 0;
+
+	/*
+	 * FIXME:  Make sure this bitmask is cleared when we clear the busReset
+	 * interrupt bit.  Clear physReqResourceAllBuses on bus reset.
+	 */
+
+	spin_lock_irqsave(&ohci->lock, flags);
+
+	if (ohci->generation != generation) {
+		retval = -ESTALE;
+		goto out;
+	}
+
+	/*
+	 * Note, if the node ID contains a non-local bus ID, physical DMA is
+	 * enabled for _all_ nodes on remote buses.
+	 */
+
+	n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63;
+	if (n < 32)
+		reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n);
+	else
+		reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32));
+
+	flush_writes(ohci);
+ out:
+	spin_unlock_irqrestore(&ohci->lock, flags);
+	return retval;
+}
+
+static u64
+ohci_get_bus_time(struct fw_card *card)
+{
+	struct fw_ohci *ohci = fw_ohci(card);
+	u32 cycle_time;
+	u64 bus_time;
+
+	cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
+	bus_time = ((u64) ohci->bus_seconds << 32) | cycle_time;
+
+	return bus_time;
+}
+
+static int handle_ir_dualbuffer_packet(struct context *context,
+				       struct descriptor *d,
+				       struct descriptor *last)
+{
+	struct iso_context *ctx =
+		container_of(context, struct iso_context, context);
+	struct db_descriptor *db = (struct db_descriptor *) d;
+	__le32 *ir_header;
+	size_t header_length;
+	void *p, *end;
+	int i;
+
+	if (db->first_res_count > 0 && db->second_res_count > 0)
+		/* This descriptor isn't done yet, stop iteration. */
+		return 0;
+
+	header_length = le16_to_cpu(db->first_req_count) -
+		le16_to_cpu(db->first_res_count);
+
+	i = ctx->header_length;
+	p = db + 1;
+	end = p + header_length;
+	while (p < end && i + ctx->base.header_size <= PAGE_SIZE) {
+		/*
+		 * The iso header is byteswapped to little endian by
+		 * the controller, but the remaining header quadlets
+		 * are big endian.  We want to present all the headers
+		 * as big endian, so we have to swap the first
+		 * quadlet.
+		 */
+		*(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
+		memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4);
+		i += ctx->base.header_size;
+		p += ctx->base.header_size + 4;
+	}
+
+	ctx->header_length = i;
+
+	if (le16_to_cpu(db->control) & DESCRIPTOR_IRQ_ALWAYS) {
+		ir_header = (__le32 *) (db + 1);
+		ctx->base.callback(&ctx->base,
+				   le32_to_cpu(ir_header[0]) & 0xffff,
+				   ctx->header_length, ctx->header,
+				   ctx->base.callback_data);
+		ctx->header_length = 0;
+	}
+
+	return 1;
+}
+
+static int handle_it_packet(struct context *context,
+			    struct descriptor *d,
+			    struct descriptor *last)
+{
+	struct iso_context *ctx =
+		container_of(context, struct iso_context, context);
+
+	if (last->transfer_status == 0)
+		/* This descriptor isn't done yet, stop iteration. */
+		return 0;
+
+	if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS)
+		ctx->base.callback(&ctx->base, le16_to_cpu(last->res_count),
+				   0, NULL, ctx->base.callback_data);
+
+	return 1;
+}
+
+static struct fw_iso_context *
+ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size)
+{
+	struct fw_ohci *ohci = fw_ohci(card);
+	struct iso_context *ctx, *list;
+	descriptor_callback_t callback;
+	u32 *mask, regs;
+	unsigned long flags;
+	int index, retval = -ENOMEM;
+
+	if (type == FW_ISO_CONTEXT_TRANSMIT) {
+		mask = &ohci->it_context_mask;
+		list = ohci->it_context_list;
+		callback = handle_it_packet;
+	} else {
+		mask = &ohci->ir_context_mask;
+		list = ohci->ir_context_list;
+		callback = handle_ir_dualbuffer_packet;
+	}
+
+	/* FIXME: We need a fallback for pre 1.1 OHCI. */
+	if (callback == handle_ir_dualbuffer_packet &&
+	    ohci->version < OHCI_VERSION_1_1)
+		return ERR_PTR(-EINVAL);
+
+	spin_lock_irqsave(&ohci->lock, flags);
+	index = ffs(*mask) - 1;
+	if (index >= 0)
+		*mask &= ~(1 << index);
+	spin_unlock_irqrestore(&ohci->lock, flags);
+
+	if (index < 0)
+		return ERR_PTR(-EBUSY);
+
+	if (type == FW_ISO_CONTEXT_TRANSMIT)
+		regs = OHCI1394_IsoXmitContextBase(index);
+	else
+		regs = OHCI1394_IsoRcvContextBase(index);
+
+	ctx = &list[index];
+	memset(ctx, 0, sizeof(*ctx));
+	ctx->header_length = 0;
+	ctx->header = (void *) __get_free_page(GFP_KERNEL);
+	if (ctx->header == NULL)
+		goto out;
+
+	retval = context_init(&ctx->context, ohci, ISO_BUFFER_SIZE,
+			      regs, callback);
+	if (retval < 0)
+		goto out_with_header;
+
+	return &ctx->base;
+
+ out_with_header:
+	free_page((unsigned long)ctx->header);
+ out:
+	spin_lock_irqsave(&ohci->lock, flags);
+	*mask |= 1 << index;
+	spin_unlock_irqrestore(&ohci->lock, flags);
+
+	return ERR_PTR(retval);
+}
+
+static int ohci_start_iso(struct fw_iso_context *base,
+			  s32 cycle, u32 sync, u32 tags)
+{
+	struct iso_context *ctx = container_of(base, struct iso_context, base);
+	struct fw_ohci *ohci = ctx->context.ohci;
+	u32 control, match;
+	int index;
+
+	if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
+		index = ctx - ohci->it_context_list;
+		match = 0;
+		if (cycle >= 0)
+			match = IT_CONTEXT_CYCLE_MATCH_ENABLE |
+				(cycle & 0x7fff) << 16;
+
+		reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index);
+		reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index);
+		context_run(&ctx->context, match);
+	} else {
+		index = ctx - ohci->ir_context_list;
+		control = IR_CONTEXT_DUAL_BUFFER_MODE | IR_CONTEXT_ISOCH_HEADER;
+		match = (tags << 28) | (sync << 8) | ctx->base.channel;
+		if (cycle >= 0) {
+			match |= (cycle & 0x07fff) << 12;
+			control |= IR_CONTEXT_CYCLE_MATCH_ENABLE;
+		}
+
+		reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index);
+		reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
+		reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
+		context_run(&ctx->context, control);
+	}
+
+	return 0;
+}
+
+static int ohci_stop_iso(struct fw_iso_context *base)
+{
+	struct fw_ohci *ohci = fw_ohci(base->card);
+	struct iso_context *ctx = container_of(base, struct iso_context, base);
+	int index;
+
+	if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
+		index = ctx - ohci->it_context_list;
+		reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index);
+	} else {
+		index = ctx - ohci->ir_context_list;
+		reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index);
+	}
+	flush_writes(ohci);
+	context_stop(&ctx->context);
+
+	return 0;
+}
+
+static void ohci_free_iso_context(struct fw_iso_context *base)
+{
+	struct fw_ohci *ohci = fw_ohci(base->card);
+	struct iso_context *ctx = container_of(base, struct iso_context, base);
+	unsigned long flags;
+	int index;
+
+	ohci_stop_iso(base);
+	context_release(&ctx->context);
+	free_page((unsigned long)ctx->header);
+
+	spin_lock_irqsave(&ohci->lock, flags);
+
+	if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
+		index = ctx - ohci->it_context_list;
+		ohci->it_context_mask |= 1 << index;
+	} else {
+		index = ctx - ohci->ir_context_list;
+		ohci->ir_context_mask |= 1 << index;
+	}
+
+	spin_unlock_irqrestore(&ohci->lock, flags);
+}
+
+static int
+ohci_queue_iso_transmit(struct fw_iso_context *base,
+			struct fw_iso_packet *packet,
+			struct fw_iso_buffer *buffer,
+			unsigned long payload)
+{
+	struct iso_context *ctx = container_of(base, struct iso_context, base);
+	struct descriptor *d, *last, *pd;
+	struct fw_iso_packet *p;
+	__le32 *header;
+	dma_addr_t d_bus, page_bus;
+	u32 z, header_z, payload_z, irq;
+	u32 payload_index, payload_end_index, next_page_index;
+	int page, end_page, i, length, offset;
+
+	/*
+	 * FIXME: Cycle lost behavior should be configurable: lose
+	 * packet, retransmit or terminate..
+	 */
+
+	p = packet;
+	payload_index = payload;
+
+	if (p->skip)
+		z = 1;
+	else
+		z = 2;
+	if (p->header_length > 0)
+		z++;
+
+	/* Determine the first page the payload isn't contained in. */
+	end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT;
+	if (p->payload_length > 0)
+		payload_z = end_page - (payload_index >> PAGE_SHIFT);
+	else
+		payload_z = 0;
+
+	z += payload_z;
+
+	/* Get header size in number of descriptors. */
+	header_z = DIV_ROUND_UP(p->header_length, sizeof(*d));
+
+	d = context_get_descriptors(&ctx->context, z + header_z, &d_bus);
+	if (d == NULL)
+		return -ENOMEM;
+
+	if (!p->skip) {
+		d[0].control   = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
+		d[0].req_count = cpu_to_le16(8);
+
+		header = (__le32 *) &d[1];
+		header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
+					IT_HEADER_TAG(p->tag) |
+					IT_HEADER_TCODE(TCODE_STREAM_DATA) |
+					IT_HEADER_CHANNEL(ctx->base.channel) |
+					IT_HEADER_SPEED(ctx->base.speed));
+		header[1] =
+			cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length +
+							  p->payload_length));
+	}
+
+	if (p->header_length > 0) {
+		d[2].req_count    = cpu_to_le16(p->header_length);
+		d[2].data_address = cpu_to_le32(d_bus + z * sizeof(*d));
+		memcpy(&d[z], p->header, p->header_length);
+	}
+
+	pd = d + z - payload_z;
+	payload_end_index = payload_index + p->payload_length;
+	for (i = 0; i < payload_z; i++) {
+		page               = payload_index >> PAGE_SHIFT;
+		offset             = payload_index & ~PAGE_MASK;
+		next_page_index    = (page + 1) << PAGE_SHIFT;
+		length             =
+			min(next_page_index, payload_end_index) - payload_index;
+		pd[i].req_count    = cpu_to_le16(length);
+
+		page_bus = page_private(buffer->pages[page]);
+		pd[i].data_address = cpu_to_le32(page_bus + offset);
+
+		payload_index += length;
+	}
+
+	if (p->interrupt)
+		irq = DESCRIPTOR_IRQ_ALWAYS;
+	else
+		irq = DESCRIPTOR_NO_IRQ;
+
+	last = z == 2 ? d : d + z - 1;
+	last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
+				     DESCRIPTOR_STATUS |
+				     DESCRIPTOR_BRANCH_ALWAYS |
+				     irq);
+
+	context_append(&ctx->context, d, z, header_z);
+
+	return 0;
+}
+
+static int
+ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
+				  struct fw_iso_packet *packet,
+				  struct fw_iso_buffer *buffer,
+				  unsigned long payload)
+{
+	struct iso_context *ctx = container_of(base, struct iso_context, base);
+	struct db_descriptor *db = NULL;
+	struct descriptor *d;
+	struct fw_iso_packet *p;
+	dma_addr_t d_bus, page_bus;
+	u32 z, header_z, length, rest;
+	int page, offset, packet_count, header_size;
+
+	/*
+	 * FIXME: Cycle lost behavior should be configurable: lose
+	 * packet, retransmit or terminate..
+	 */
+
+	if (packet->skip) {
+		d = context_get_descriptors(&ctx->context, 2, &d_bus);
+		if (d == NULL)
+			return -ENOMEM;
+
+		db = (struct db_descriptor *) d;
+		db->control = cpu_to_le16(DESCRIPTOR_STATUS |
+					  DESCRIPTOR_BRANCH_ALWAYS |
+					  DESCRIPTOR_WAIT);
+		db->first_size = cpu_to_le16(ctx->base.header_size + 4);
+		context_append(&ctx->context, d, 2, 0);
+	}
+
+	p = packet;
+	z = 2;
+
+	/*
+	 * The OHCI controller puts the status word in the header
+	 * buffer too, so we need 4 extra bytes per packet.
+	 */
+	packet_count = p->header_length / ctx->base.header_size;
+	header_size = packet_count * (ctx->base.header_size + 4);
+
+	/* Get header size in number of descriptors. */
+	header_z = DIV_ROUND_UP(header_size, sizeof(*d));
+	page     = payload >> PAGE_SHIFT;
+	offset   = payload & ~PAGE_MASK;
+	rest     = p->payload_length;
+
+	/* FIXME: OHCI 1.0 doesn't support dual buffer receive */
+	/* FIXME: make packet-per-buffer/dual-buffer a context option */
+	while (rest > 0) {
+		d = context_get_descriptors(&ctx->context,
+					    z + header_z, &d_bus);
+		if (d == NULL)
+			return -ENOMEM;
+
+		db = (struct db_descriptor *) d;
+		db->control = cpu_to_le16(DESCRIPTOR_STATUS |
+					  DESCRIPTOR_BRANCH_ALWAYS);
+		db->first_size = cpu_to_le16(ctx->base.header_size + 4);
+		db->first_req_count = cpu_to_le16(header_size);
+		db->first_res_count = db->first_req_count;
+		db->first_buffer = cpu_to_le32(d_bus + sizeof(*db));
+
+		if (offset + rest < PAGE_SIZE)
+			length = rest;
+		else
+			length = PAGE_SIZE - offset;
+
+		db->second_req_count = cpu_to_le16(length);
+		db->second_res_count = db->second_req_count;
+		page_bus = page_private(buffer->pages[page]);
+		db->second_buffer = cpu_to_le32(page_bus + offset);
+
+		if (p->interrupt && length == rest)
+			db->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
+
+		context_append(&ctx->context, d, z, header_z);
+		offset = (offset + length) & ~PAGE_MASK;
+		rest -= length;
+		page++;
+	}
+
+	return 0;
+}
+
+static int
+ohci_queue_iso(struct fw_iso_context *base,
+	       struct fw_iso_packet *packet,
+	       struct fw_iso_buffer *buffer,
+	       unsigned long payload)
+{
+	struct iso_context *ctx = container_of(base, struct iso_context, base);
+
+	if (base->type == FW_ISO_CONTEXT_TRANSMIT)
+		return ohci_queue_iso_transmit(base, packet, buffer, payload);
+	else if (ctx->context.ohci->version >= OHCI_VERSION_1_1)
+		return ohci_queue_iso_receive_dualbuffer(base, packet,
+							 buffer, payload);
+	else
+		/* FIXME: Implement fallback for OHCI 1.0 controllers. */
+		return -EINVAL;
+}
+
+static const struct fw_card_driver ohci_driver = {
+	.name			= ohci_driver_name,
+	.enable			= ohci_enable,
+	.update_phy_reg		= ohci_update_phy_reg,
+	.set_config_rom		= ohci_set_config_rom,
+	.send_request		= ohci_send_request,
+	.send_response		= ohci_send_response,
+	.cancel_packet		= ohci_cancel_packet,
+	.enable_phys_dma	= ohci_enable_phys_dma,
+	.get_bus_time		= ohci_get_bus_time,
+
+	.allocate_iso_context	= ohci_allocate_iso_context,
+	.free_iso_context	= ohci_free_iso_context,
+	.queue_iso		= ohci_queue_iso,
+	.start_iso		= ohci_start_iso,
+	.stop_iso		= ohci_stop_iso,
+};
+
+static int software_reset(struct fw_ohci *ohci)
+{
+	int i;
+
+	reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
+
+	for (i = 0; i < OHCI_LOOP_COUNT; i++) {
+		if ((reg_read(ohci, OHCI1394_HCControlSet) &
+		     OHCI1394_HCControl_softReset) == 0)
+			return 0;
+		msleep(1);
+	}
+
+	return -EBUSY;
+}
+
+static int __devinit
+pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
+{
+	struct fw_ohci *ohci;
+	u32 bus_options, max_receive, link_speed;
+	u64 guid;
+	int err;
+	size_t size;
+
+	ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
+	if (ohci == NULL) {
+		fw_error("Could not malloc fw_ohci data.\n");
+		return -ENOMEM;
+	}
+
+	fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
+
+	err = pci_enable_device(dev);
+	if (err) {
+		fw_error("Failed to enable OHCI hardware.\n");
+		goto fail_put_card;
+	}
+
+	pci_set_master(dev);
+	pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
+	pci_set_drvdata(dev, ohci);
+
+	spin_lock_init(&ohci->lock);
+
+	tasklet_init(&ohci->bus_reset_tasklet,
+		     bus_reset_tasklet, (unsigned long)ohci);
+
+	err = pci_request_region(dev, 0, ohci_driver_name);
+	if (err) {
+		fw_error("MMIO resource unavailable\n");
+		goto fail_disable;
+	}
+
+	ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE);
+	if (ohci->registers == NULL) {
+		fw_error("Failed to remap registers\n");
+		err = -ENXIO;
+		goto fail_iomem;
+	}
+
+	if (software_reset(ohci)) {
+		fw_error("Failed to reset ohci card.\n");
+		err = -EBUSY;
+		goto fail_registers;
+	}
+
+	/*
+	 * Now enable LPS, which we need in order to start accessing
+	 * most of the registers.  In fact, on some cards (ALI M5251),
+	 * accessing registers in the SClk domain without LPS enabled
+	 * will lock up the machine.  Wait 50msec to make sure we have
+	 * full link enabled.
+	 */
+	reg_write(ohci, OHCI1394_HCControlSet,
+		  OHCI1394_HCControl_LPS |
+		  OHCI1394_HCControl_postedWriteEnable);
+	flush_writes(ohci);
+	msleep(50);
+
+	reg_write(ohci, OHCI1394_HCControlClear,
+		  OHCI1394_HCControl_noByteSwapData);
+
+	reg_write(ohci, OHCI1394_LinkControlSet,
+		  OHCI1394_LinkControl_rcvSelfID |
+		  OHCI1394_LinkControl_cycleTimerEnable |
+		  OHCI1394_LinkControl_cycleMaster);
+
+	ar_context_init(&ohci->ar_request_ctx, ohci,
+			OHCI1394_AsReqRcvContextControlSet);
+
+	ar_context_init(&ohci->ar_response_ctx, ohci,
+			OHCI1394_AsRspRcvContextControlSet);
+
+	context_init(&ohci->at_request_ctx, ohci, AT_BUFFER_SIZE,
+		     OHCI1394_AsReqTrContextControlSet, handle_at_packet);
+
+	context_init(&ohci->at_response_ctx, ohci, AT_BUFFER_SIZE,
+		     OHCI1394_AsRspTrContextControlSet, handle_at_packet);
+
+	reg_write(ohci, OHCI1394_ATRetries,
+		  OHCI1394_MAX_AT_REQ_RETRIES |
+		  (OHCI1394_MAX_AT_RESP_RETRIES << 4) |
+		  (OHCI1394_MAX_PHYS_RESP_RETRIES << 8));
+
+	reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
+	ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
+	reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
+	size = sizeof(struct iso_context) * hweight32(ohci->it_context_mask);
+	ohci->it_context_list = kzalloc(size, GFP_KERNEL);
+
+	reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
+	ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
+	reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
+	size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask);
+	ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
+
+	if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) {
+		fw_error("Out of memory for it/ir contexts.\n");
+		err = -ENOMEM;
+		goto fail_registers;
+	}
+
+	/* self-id dma buffer allocation */
+	ohci->self_id_cpu = dma_alloc_coherent(ohci->card.device,
+					       SELF_ID_BUF_SIZE,
+					       &ohci->self_id_bus,
+					       GFP_KERNEL);
+	if (ohci->self_id_cpu == NULL) {
+		fw_error("Out of memory for self ID buffer.\n");
+		err = -ENOMEM;
+		goto fail_registers;
+	}
+
+	reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
+	reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000);
+	reg_write(ohci, OHCI1394_IntEventClear, ~0);
+	reg_write(ohci, OHCI1394_IntMaskClear, ~0);
+	reg_write(ohci, OHCI1394_IntMaskSet,
+		  OHCI1394_selfIDComplete |
+		  OHCI1394_RQPkt | OHCI1394_RSPkt |
+		  OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
+		  OHCI1394_isochRx | OHCI1394_isochTx |
+		  OHCI1394_masterIntEnable |
+		  OHCI1394_cycle64Seconds);
+
+	bus_options = reg_read(ohci, OHCI1394_BusOptions);
+	max_receive = (bus_options >> 12) & 0xf;
+	link_speed = bus_options & 0x7;
+	guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) |
+		reg_read(ohci, OHCI1394_GUIDLo);
+
+	err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
+	if (err < 0)
+		goto fail_self_id;
+
+	ohci->version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
+	fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n",
+		  dev->dev.bus_id, ohci->version >> 16, ohci->version & 0xff);
+
+	return 0;
+
+ fail_self_id:
+	dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
+			  ohci->self_id_cpu, ohci->self_id_bus);
+ fail_registers:
+	kfree(ohci->it_context_list);
+	kfree(ohci->ir_context_list);
+	pci_iounmap(dev, ohci->registers);
+ fail_iomem:
+	pci_release_region(dev, 0);
+ fail_disable:
+	pci_disable_device(dev);
+ fail_put_card:
+	fw_card_put(&ohci->card);
+
+	return err;
+}
+
+static void pci_remove(struct pci_dev *dev)
+{
+	struct fw_ohci *ohci;
+
+	ohci = pci_get_drvdata(dev);
+	reg_write(ohci, OHCI1394_IntMaskClear, ~0);
+	flush_writes(ohci);
+	fw_core_remove_card(&ohci->card);
+
+	/*
+	 * FIXME: Fail all pending packets here, now that the upper
+	 * layers can't queue any more.
+	 */
+
+	software_reset(ohci);
+	free_irq(dev->irq, ohci);
+	dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
+			  ohci->self_id_cpu, ohci->self_id_bus);
+	kfree(ohci->it_context_list);
+	kfree(ohci->ir_context_list);
+	pci_iounmap(dev, ohci->registers);
+	pci_release_region(dev, 0);
+	pci_disable_device(dev);
+	fw_card_put(&ohci->card);
+
+	fw_notify("Removed fw-ohci device.\n");
+}
+
+static struct pci_device_id pci_table[] = {
+	{ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
+	{ }
+};
+
+MODULE_DEVICE_TABLE(pci, pci_table);
+
+static struct pci_driver fw_ohci_pci_driver = {
+	.name		= ohci_driver_name,
+	.id_table	= pci_table,
+	.probe		= pci_probe,
+	.remove		= pci_remove,
+};
+
+MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
+MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
+MODULE_LICENSE("GPL");
+
+/* Provide a module alias so root-on-sbp2 initrds don't break. */
+#ifndef CONFIG_IEEE1394_OHCI1394_MODULE
+MODULE_ALIAS("ohci1394");
+#endif
+
+static int __init fw_ohci_init(void)
+{
+	return pci_register_driver(&fw_ohci_pci_driver);
+}
+
+static void __exit fw_ohci_cleanup(void)
+{
+	pci_unregister_driver(&fw_ohci_pci_driver);
+}
+
+module_init(fw_ohci_init);
+module_exit(fw_ohci_cleanup);
diff --git a/drivers/firewire/fw-ohci.h b/drivers/firewire/fw-ohci.h
new file mode 100644
index 000000000000..fa15706397d7
--- /dev/null
+++ b/drivers/firewire/fw-ohci.h
@@ -0,0 +1,153 @@
+#ifndef __fw_ohci_h
+#define __fw_ohci_h
+
+/* OHCI register map */
+
+#define OHCI1394_Version                      0x000
+#define OHCI1394_GUID_ROM                     0x004
+#define OHCI1394_ATRetries                    0x008
+#define OHCI1394_CSRData                      0x00C
+#define OHCI1394_CSRCompareData               0x010
+#define OHCI1394_CSRControl                   0x014
+#define OHCI1394_ConfigROMhdr                 0x018
+#define OHCI1394_BusID                        0x01C
+#define OHCI1394_BusOptions                   0x020
+#define OHCI1394_GUIDHi                       0x024
+#define OHCI1394_GUIDLo                       0x028
+#define OHCI1394_ConfigROMmap                 0x034
+#define OHCI1394_PostedWriteAddressLo         0x038
+#define OHCI1394_PostedWriteAddressHi         0x03C
+#define OHCI1394_VendorID                     0x040
+#define OHCI1394_HCControlSet                 0x050
+#define OHCI1394_HCControlClear               0x054
+#define  OHCI1394_HCControl_BIBimageValid	0x80000000
+#define  OHCI1394_HCControl_noByteSwapData	0x40000000
+#define  OHCI1394_HCControl_programPhyEnable	0x00800000
+#define  OHCI1394_HCControl_aPhyEnhanceEnable	0x00400000
+#define  OHCI1394_HCControl_LPS			0x00080000
+#define  OHCI1394_HCControl_postedWriteEnable	0x00040000
+#define  OHCI1394_HCControl_linkEnable		0x00020000
+#define  OHCI1394_HCControl_softReset		0x00010000
+#define OHCI1394_SelfIDBuffer                 0x064
+#define OHCI1394_SelfIDCount                  0x068
+#define OHCI1394_IRMultiChanMaskHiSet         0x070
+#define OHCI1394_IRMultiChanMaskHiClear       0x074
+#define OHCI1394_IRMultiChanMaskLoSet         0x078
+#define OHCI1394_IRMultiChanMaskLoClear       0x07C
+#define OHCI1394_IntEventSet                  0x080
+#define OHCI1394_IntEventClear                0x084
+#define OHCI1394_IntMaskSet                   0x088
+#define OHCI1394_IntMaskClear                 0x08C
+#define OHCI1394_IsoXmitIntEventSet           0x090
+#define OHCI1394_IsoXmitIntEventClear         0x094
+#define OHCI1394_IsoXmitIntMaskSet            0x098
+#define OHCI1394_IsoXmitIntMaskClear          0x09C
+#define OHCI1394_IsoRecvIntEventSet           0x0A0
+#define OHCI1394_IsoRecvIntEventClear         0x0A4
+#define OHCI1394_IsoRecvIntMaskSet            0x0A8
+#define OHCI1394_IsoRecvIntMaskClear          0x0AC
+#define OHCI1394_InitialBandwidthAvailable    0x0B0
+#define OHCI1394_InitialChannelsAvailableHi   0x0B4
+#define OHCI1394_InitialChannelsAvailableLo   0x0B8
+#define OHCI1394_FairnessControl              0x0DC
+#define OHCI1394_LinkControlSet               0x0E0
+#define OHCI1394_LinkControlClear             0x0E4
+#define   OHCI1394_LinkControl_rcvSelfID	(1 << 9)
+#define   OHCI1394_LinkControl_rcvPhyPkt	(1 << 10)
+#define   OHCI1394_LinkControl_cycleTimerEnable	(1 << 20)
+#define   OHCI1394_LinkControl_cycleMaster	(1 << 21)
+#define   OHCI1394_LinkControl_cycleSource	(1 << 22)
+#define OHCI1394_NodeID                       0x0E8
+#define   OHCI1394_NodeID_idValid             0x80000000
+#define OHCI1394_PhyControl                   0x0EC
+#define   OHCI1394_PhyControl_Read(addr)	(((addr) << 8) | 0x00008000)
+#define   OHCI1394_PhyControl_ReadDone		0x80000000
+#define   OHCI1394_PhyControl_ReadData(r)	(((r) & 0x00ff0000) >> 16)
+#define   OHCI1394_PhyControl_Write(addr, data)	(((addr) << 8) | (data) | 0x00004000)
+#define   OHCI1394_PhyControl_WriteDone		0x00004000
+#define OHCI1394_IsochronousCycleTimer        0x0F0
+#define OHCI1394_AsReqFilterHiSet             0x100
+#define OHCI1394_AsReqFilterHiClear           0x104
+#define OHCI1394_AsReqFilterLoSet             0x108
+#define OHCI1394_AsReqFilterLoClear           0x10C
+#define OHCI1394_PhyReqFilterHiSet            0x110
+#define OHCI1394_PhyReqFilterHiClear          0x114
+#define OHCI1394_PhyReqFilterLoSet            0x118
+#define OHCI1394_PhyReqFilterLoClear          0x11C
+#define OHCI1394_PhyUpperBound                0x120
+
+#define OHCI1394_AsReqTrContextBase           0x180
+#define OHCI1394_AsReqTrContextControlSet     0x180
+#define OHCI1394_AsReqTrContextControlClear   0x184
+#define OHCI1394_AsReqTrCommandPtr            0x18C
+
+#define OHCI1394_AsRspTrContextBase           0x1A0
+#define OHCI1394_AsRspTrContextControlSet     0x1A0
+#define OHCI1394_AsRspTrContextControlClear   0x1A4
+#define OHCI1394_AsRspTrCommandPtr            0x1AC
+
+#define OHCI1394_AsReqRcvContextBase          0x1C0
+#define OHCI1394_AsReqRcvContextControlSet    0x1C0
+#define OHCI1394_AsReqRcvContextControlClear  0x1C4
+#define OHCI1394_AsReqRcvCommandPtr           0x1CC
+
+#define OHCI1394_AsRspRcvContextBase          0x1E0
+#define OHCI1394_AsRspRcvContextControlSet    0x1E0
+#define OHCI1394_AsRspRcvContextControlClear  0x1E4
+#define OHCI1394_AsRspRcvCommandPtr           0x1EC
+
+/* Isochronous transmit registers */
+#define OHCI1394_IsoXmitContextBase(n)           (0x200 + 16 * (n))
+#define OHCI1394_IsoXmitContextControlSet(n)     (0x200 + 16 * (n))
+#define OHCI1394_IsoXmitContextControlClear(n)   (0x204 + 16 * (n))
+#define OHCI1394_IsoXmitCommandPtr(n)            (0x20C + 16 * (n))
+
+/* Isochronous receive registers */
+#define OHCI1394_IsoRcvContextBase(n)         (0x400 + 32 * (n))
+#define OHCI1394_IsoRcvContextControlSet(n)   (0x400 + 32 * (n))
+#define OHCI1394_IsoRcvContextControlClear(n) (0x404 + 32 * (n))
+#define OHCI1394_IsoRcvCommandPtr(n)          (0x40C + 32 * (n))
+#define OHCI1394_IsoRcvContextMatch(n)        (0x410 + 32 * (n))
+
+/* Interrupts Mask/Events */
+#define OHCI1394_reqTxComplete		0x00000001
+#define OHCI1394_respTxComplete		0x00000002
+#define OHCI1394_ARRQ			0x00000004
+#define OHCI1394_ARRS			0x00000008
+#define OHCI1394_RQPkt			0x00000010
+#define OHCI1394_RSPkt			0x00000020
+#define OHCI1394_isochTx		0x00000040
+#define OHCI1394_isochRx		0x00000080
+#define OHCI1394_postedWriteErr		0x00000100
+#define OHCI1394_lockRespErr		0x00000200
+#define OHCI1394_selfIDComplete		0x00010000
+#define OHCI1394_busReset		0x00020000
+#define OHCI1394_phy			0x00080000
+#define OHCI1394_cycleSynch		0x00100000
+#define OHCI1394_cycle64Seconds		0x00200000
+#define OHCI1394_cycleLost		0x00400000
+#define OHCI1394_cycleInconsistent	0x00800000
+#define OHCI1394_unrecoverableError	0x01000000
+#define OHCI1394_cycleTooLong		0x02000000
+#define OHCI1394_phyRegRcvd		0x04000000
+#define OHCI1394_masterIntEnable	0x80000000
+
+#define OHCI1394_evt_no_status		0x0
+#define OHCI1394_evt_long_packet	0x2
+#define OHCI1394_evt_missing_ack	0x3
+#define OHCI1394_evt_underrun		0x4
+#define OHCI1394_evt_overrun		0x5
+#define OHCI1394_evt_descriptor_read	0x6
+#define OHCI1394_evt_data_read		0x7
+#define OHCI1394_evt_data_write		0x8
+#define OHCI1394_evt_bus_reset		0x9
+#define OHCI1394_evt_timeout		0xa
+#define OHCI1394_evt_tcode_err		0xb
+#define OHCI1394_evt_reserved_b		0xc
+#define OHCI1394_evt_reserved_c		0xd
+#define OHCI1394_evt_unknown		0xe
+#define OHCI1394_evt_flushed		0xf
+
+#define OHCI1394_phy_tcode		0xe
+
+#endif /* __fw_ohci_h */
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c
new file mode 100644
index 000000000000..68300414e5f4
--- /dev/null
+++ b/drivers/firewire/fw-sbp2.c
@@ -0,0 +1,1147 @@
+/*
+ * SBP2 driver (SCSI over IEEE1394)
+ *
+ * Copyright (C) 2005-2007  Kristian Hoegsberg <krh@bitplanet.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+ * The basic structure of this driver is based on the old storage driver,
+ * drivers/ieee1394/sbp2.c, originally written by
+ *     James Goodwin <jamesg@filanet.com>
+ * with later contributions and ongoing maintenance from
+ *     Ben Collins <bcollins@debian.org>,
+ *     Stefan Richter <stefanr@s5r6.in-berlin.de>
+ * and many others.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/device.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/timer.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+#include "fw-transaction.h"
+#include "fw-topology.h"
+#include "fw-device.h"
+
+/* I don't know why the SCSI stack doesn't define something like this... */
+typedef void (*scsi_done_fn_t)(struct scsi_cmnd *);
+
+static const char sbp2_driver_name[] = "sbp2";
+
+struct sbp2_device {
+	struct kref kref;
+	struct fw_unit *unit;
+	struct fw_address_handler address_handler;
+	struct list_head orb_list;
+	u64 management_agent_address;
+	u64 command_block_agent_address;
+	u32 workarounds;
+	int login_id;
+
+	/*
+	 * We cache these addresses and only update them once we've
+	 * logged in or reconnected to the sbp2 device.  That way, any
+	 * IO to the device will automatically fail and get retried if
+	 * it happens in a window where the device is not ready to
+	 * handle it (e.g. after a bus reset but before we reconnect).
+	 */
+	int node_id;
+	int address_high;
+	int generation;
+
+	int retries;
+	struct delayed_work work;
+};
+
+#define SBP2_MAX_SG_ELEMENT_LENGTH	0xf000
+#define SBP2_MAX_SECTORS		255	/* Max sectors supported */
+#define SBP2_ORB_TIMEOUT		2000	/* Timeout in ms */
+
+#define SBP2_ORB_NULL			0x80000000
+
+#define SBP2_DIRECTION_TO_MEDIA		0x0
+#define SBP2_DIRECTION_FROM_MEDIA	0x1
+
+/* Unit directory keys */
+#define SBP2_COMMAND_SET_SPECIFIER	0x38
+#define SBP2_COMMAND_SET		0x39
+#define SBP2_COMMAND_SET_REVISION	0x3b
+#define SBP2_FIRMWARE_REVISION		0x3c
+
+/* Flags for detected oddities and brokeness */
+#define SBP2_WORKAROUND_128K_MAX_TRANS	0x1
+#define SBP2_WORKAROUND_INQUIRY_36	0x2
+#define SBP2_WORKAROUND_MODE_SENSE_8	0x4
+#define SBP2_WORKAROUND_FIX_CAPACITY	0x8
+#define SBP2_WORKAROUND_OVERRIDE	0x100
+
+/* Management orb opcodes */
+#define SBP2_LOGIN_REQUEST		0x0
+#define SBP2_QUERY_LOGINS_REQUEST	0x1
+#define SBP2_RECONNECT_REQUEST		0x3
+#define SBP2_SET_PASSWORD_REQUEST	0x4
+#define SBP2_LOGOUT_REQUEST		0x7
+#define SBP2_ABORT_TASK_REQUEST		0xb
+#define SBP2_ABORT_TASK_SET		0xc
+#define SBP2_LOGICAL_UNIT_RESET		0xe
+#define SBP2_TARGET_RESET_REQUEST	0xf
+
+/* Offsets for command block agent registers */
+#define SBP2_AGENT_STATE		0x00
+#define SBP2_AGENT_RESET		0x04
+#define SBP2_ORB_POINTER		0x08
+#define SBP2_DOORBELL			0x10
+#define SBP2_UNSOLICITED_STATUS_ENABLE	0x14
+
+/* Status write response codes */
+#define SBP2_STATUS_REQUEST_COMPLETE	0x0
+#define SBP2_STATUS_TRANSPORT_FAILURE	0x1
+#define SBP2_STATUS_ILLEGAL_REQUEST	0x2
+#define SBP2_STATUS_VENDOR_DEPENDENT	0x3
+
+#define STATUS_GET_ORB_HIGH(v)		((v).status & 0xffff)
+#define STATUS_GET_SBP_STATUS(v)	(((v).status >> 16) & 0xff)
+#define STATUS_GET_LEN(v)		(((v).status >> 24) & 0x07)
+#define STATUS_GET_DEAD(v)		(((v).status >> 27) & 0x01)
+#define STATUS_GET_RESPONSE(v)		(((v).status >> 28) & 0x03)
+#define STATUS_GET_SOURCE(v)		(((v).status >> 30) & 0x03)
+#define STATUS_GET_ORB_LOW(v)		((v).orb_low)
+#define STATUS_GET_DATA(v)		((v).data)
+
+struct sbp2_status {
+	u32 status;
+	u32 orb_low;
+	u8 data[24];
+};
+
+struct sbp2_pointer {
+	u32 high;
+	u32 low;
+};
+
+struct sbp2_orb {
+	struct fw_transaction t;
+	dma_addr_t request_bus;
+	int rcode;
+	struct sbp2_pointer pointer;
+	void (*callback)(struct sbp2_orb * orb, struct sbp2_status * status);
+	struct list_head link;
+};
+
+#define MANAGEMENT_ORB_LUN(v)			((v))
+#define MANAGEMENT_ORB_FUNCTION(v)		((v) << 16)
+#define MANAGEMENT_ORB_RECONNECT(v)		((v) << 20)
+#define MANAGEMENT_ORB_EXCLUSIVE		((1) << 28)
+#define MANAGEMENT_ORB_REQUEST_FORMAT(v)	((v) << 29)
+#define MANAGEMENT_ORB_NOTIFY			((1) << 31)
+
+#define MANAGEMENT_ORB_RESPONSE_LENGTH(v)	((v))
+#define MANAGEMENT_ORB_PASSWORD_LENGTH(v)	((v) << 16)
+
+struct sbp2_management_orb {
+	struct sbp2_orb base;
+	struct {
+		struct sbp2_pointer password;
+		struct sbp2_pointer response;
+		u32 misc;
+		u32 length;
+		struct sbp2_pointer status_fifo;
+	} request;
+	__be32 response[4];
+	dma_addr_t response_bus;
+	struct completion done;
+	struct sbp2_status status;
+};
+
+#define LOGIN_RESPONSE_GET_LOGIN_ID(v)	((v).misc & 0xffff)
+#define LOGIN_RESPONSE_GET_LENGTH(v)	(((v).misc >> 16) & 0xffff)
+
+struct sbp2_login_response {
+	u32 misc;
+	struct sbp2_pointer command_block_agent;
+	u32 reconnect_hold;
+};
+#define COMMAND_ORB_DATA_SIZE(v)	((v))
+#define COMMAND_ORB_PAGE_SIZE(v)	((v) << 16)
+#define COMMAND_ORB_PAGE_TABLE_PRESENT	((1) << 19)
+#define COMMAND_ORB_MAX_PAYLOAD(v)	((v) << 20)
+#define COMMAND_ORB_SPEED(v)		((v) << 24)
+#define COMMAND_ORB_DIRECTION(v)	((v) << 27)
+#define COMMAND_ORB_REQUEST_FORMAT(v)	((v) << 29)
+#define COMMAND_ORB_NOTIFY		((1) << 31)
+
+struct sbp2_command_orb {
+	struct sbp2_orb base;
+	struct {
+		struct sbp2_pointer next;
+		struct sbp2_pointer data_descriptor;
+		u32 misc;
+		u8 command_block[12];
+	} request;
+	struct scsi_cmnd *cmd;
+	scsi_done_fn_t done;
+	struct fw_unit *unit;
+
+	struct sbp2_pointer page_table[SG_ALL];
+	dma_addr_t page_table_bus;
+	dma_addr_t request_buffer_bus;
+};
+
+/*
+ * List of devices with known bugs.
+ *
+ * The firmware_revision field, masked with 0xffff00, is the best
+ * indicator for the type of bridge chip of a device.  It yields a few
+ * false positives but this did not break correctly behaving devices
+ * so far.  We use ~0 as a wildcard, since the 24 bit values we get
+ * from the config rom can never match that.
+ */
+static const struct {
+	u32 firmware_revision;
+	u32 model;
+	unsigned workarounds;
+} sbp2_workarounds_table[] = {
+	/* DViCO Momobay CX-1 with TSB42AA9 bridge */ {
+		.firmware_revision	= 0x002800,
+		.model			= 0x001010,
+		.workarounds		= SBP2_WORKAROUND_INQUIRY_36 |
+					  SBP2_WORKAROUND_MODE_SENSE_8,
+	},
+	/* Initio bridges, actually only needed for some older ones */ {
+		.firmware_revision	= 0x000200,
+		.model			= ~0,
+		.workarounds		= SBP2_WORKAROUND_INQUIRY_36,
+	},
+	/* Symbios bridge */ {
+		.firmware_revision	= 0xa0b800,
+		.model			= ~0,
+		.workarounds		= SBP2_WORKAROUND_128K_MAX_TRANS,
+	},
+
+	/*
+	 * There are iPods (2nd gen, 3rd gen) with model_id == 0, but
+	 * these iPods do not feature the read_capacity bug according
+	 * to one report.  Read_capacity behaviour as well as model_id
+	 * could change due to Apple-supplied firmware updates though.
+	 */
+
+	/* iPod 4th generation. */ {
+		.firmware_revision	= 0x0a2700,
+		.model			= 0x000021,
+		.workarounds		= SBP2_WORKAROUND_FIX_CAPACITY,
+	},
+	/* iPod mini */ {
+		.firmware_revision	= 0x0a2700,
+		.model			= 0x000023,
+		.workarounds		= SBP2_WORKAROUND_FIX_CAPACITY,
+	},
+	/* iPod Photo */ {
+		.firmware_revision	= 0x0a2700,
+		.model			= 0x00007e,
+		.workarounds		= SBP2_WORKAROUND_FIX_CAPACITY,
+	}
+};
+
+static void
+sbp2_status_write(struct fw_card *card, struct fw_request *request,
+		  int tcode, int destination, int source,
+		  int generation, int speed,
+		  unsigned long long offset,
+		  void *payload, size_t length, void *callback_data)
+{
+	struct sbp2_device *sd = callback_data;
+	struct sbp2_orb *orb;
+	struct sbp2_status status;
+	size_t header_size;
+	unsigned long flags;
+
+	if (tcode != TCODE_WRITE_BLOCK_REQUEST ||
+	    length == 0 || length > sizeof(status)) {
+		fw_send_response(card, request, RCODE_TYPE_ERROR);
+		return;
+	}
+
+	header_size = min(length, 2 * sizeof(u32));
+	fw_memcpy_from_be32(&status, payload, header_size);
+	if (length > header_size)
+		memcpy(status.data, payload + 8, length - header_size);
+	if (STATUS_GET_SOURCE(status) == 2 || STATUS_GET_SOURCE(status) == 3) {
+		fw_notify("non-orb related status write, not handled\n");
+		fw_send_response(card, request, RCODE_COMPLETE);
+		return;
+	}
+
+	/* Lookup the orb corresponding to this status write. */
+	spin_lock_irqsave(&card->lock, flags);
+	list_for_each_entry(orb, &sd->orb_list, link) {
+		if (STATUS_GET_ORB_HIGH(status) == 0 &&
+		    STATUS_GET_ORB_LOW(status) == orb->request_bus &&
+		    orb->rcode == RCODE_COMPLETE) {
+			list_del(&orb->link);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&card->lock, flags);
+
+	if (&orb->link != &sd->orb_list)
+		orb->callback(orb, &status);
+	else
+		fw_error("status write for unknown orb\n");
+
+	fw_send_response(card, request, RCODE_COMPLETE);
+}
+
+static void
+complete_transaction(struct fw_card *card, int rcode,
+		     void *payload, size_t length, void *data)
+{
+	struct sbp2_orb *orb = data;
+	unsigned long flags;
+
+	orb->rcode = rcode;
+	if (rcode != RCODE_COMPLETE) {
+		spin_lock_irqsave(&card->lock, flags);
+		list_del(&orb->link);
+		spin_unlock_irqrestore(&card->lock, flags);
+		orb->callback(orb, NULL);
+	}
+}
+
+static void
+sbp2_send_orb(struct sbp2_orb *orb, struct fw_unit *unit,
+	      int node_id, int generation, u64 offset)
+{
+	struct fw_device *device = fw_device(unit->device.parent);
+	struct sbp2_device *sd = unit->device.driver_data;
+	unsigned long flags;
+
+	orb->pointer.high = 0;
+	orb->pointer.low = orb->request_bus;
+	fw_memcpy_to_be32(&orb->pointer, &orb->pointer, sizeof(orb->pointer));
+
+	spin_lock_irqsave(&device->card->lock, flags);
+	list_add_tail(&orb->link, &sd->orb_list);
+	spin_unlock_irqrestore(&device->card->lock, flags);
+
+	fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST,
+			node_id, generation,
+			device->node->max_speed, offset,
+			&orb->pointer, sizeof(orb->pointer),
+			complete_transaction, orb);
+}
+
+static int sbp2_cancel_orbs(struct fw_unit *unit)
+{
+	struct fw_device *device = fw_device(unit->device.parent);
+	struct sbp2_device *sd = unit->device.driver_data;
+	struct sbp2_orb *orb, *next;
+	struct list_head list;
+	unsigned long flags;
+	int retval = -ENOENT;
+
+	INIT_LIST_HEAD(&list);
+	spin_lock_irqsave(&device->card->lock, flags);
+	list_splice_init(&sd->orb_list, &list);
+	spin_unlock_irqrestore(&device->card->lock, flags);
+
+	list_for_each_entry_safe(orb, next, &list, link) {
+		retval = 0;
+		if (fw_cancel_transaction(device->card, &orb->t) == 0)
+			continue;
+
+		orb->rcode = RCODE_CANCELLED;
+		orb->callback(orb, NULL);
+	}
+
+	return retval;
+}
+
+static void
+complete_management_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
+{
+	struct sbp2_management_orb *orb =
+	    (struct sbp2_management_orb *)base_orb;
+
+	if (status)
+		memcpy(&orb->status, status, sizeof(*status));
+	complete(&orb->done);
+}
+
+static int
+sbp2_send_management_orb(struct fw_unit *unit, int node_id, int generation,
+			 int function, int lun, void *response)
+{
+	struct fw_device *device = fw_device(unit->device.parent);
+	struct sbp2_device *sd = unit->device.driver_data;
+	struct sbp2_management_orb *orb;
+	int retval = -ENOMEM;
+
+	orb = kzalloc(sizeof(*orb), GFP_ATOMIC);
+	if (orb == NULL)
+		return -ENOMEM;
+
+	/*
+	 * The sbp2 device is going to send a block read request to
+	 * read out the request from host memory, so map it for dma.
+	 */
+	orb->base.request_bus =
+		dma_map_single(device->card->device, &orb->request,
+			       sizeof(orb->request), DMA_TO_DEVICE);
+	if (dma_mapping_error(orb->base.request_bus))
+		goto out;
+
+	orb->response_bus =
+		dma_map_single(device->card->device, &orb->response,
+			       sizeof(orb->response), DMA_FROM_DEVICE);
+	if (dma_mapping_error(orb->response_bus))
+		goto out;
+
+	orb->request.response.high    = 0;
+	orb->request.response.low     = orb->response_bus;
+
+	orb->request.misc =
+		MANAGEMENT_ORB_NOTIFY |
+		MANAGEMENT_ORB_FUNCTION(function) |
+		MANAGEMENT_ORB_LUN(lun);
+	orb->request.length =
+		MANAGEMENT_ORB_RESPONSE_LENGTH(sizeof(orb->response));
+
+	orb->request.status_fifo.high = sd->address_handler.offset >> 32;
+	orb->request.status_fifo.low  = sd->address_handler.offset;
+
+	/*
+	 * FIXME: Yeah, ok this isn't elegant, we hardwire exclusive
+	 * login and 1 second reconnect time.  The reconnect setting
+	 * is probably fine, but the exclusive login should be an option.
+	 */
+	if (function == SBP2_LOGIN_REQUEST) {
+		orb->request.misc |=
+			MANAGEMENT_ORB_EXCLUSIVE |
+			MANAGEMENT_ORB_RECONNECT(0);
+	}
+
+	fw_memcpy_to_be32(&orb->request, &orb->request, sizeof(orb->request));
+
+	init_completion(&orb->done);
+	orb->base.callback = complete_management_orb;
+
+	sbp2_send_orb(&orb->base, unit,
+		      node_id, generation, sd->management_agent_address);
+
+	wait_for_completion_timeout(&orb->done,
+				    msecs_to_jiffies(SBP2_ORB_TIMEOUT));
+
+	retval = -EIO;
+	if (sbp2_cancel_orbs(unit) == 0) {
+		fw_error("orb reply timed out, rcode=0x%02x\n",
+			 orb->base.rcode);
+		goto out;
+	}
+
+	if (orb->base.rcode != RCODE_COMPLETE) {
+		fw_error("management write failed, rcode 0x%02x\n",
+			 orb->base.rcode);
+		goto out;
+	}
+
+	if (STATUS_GET_RESPONSE(orb->status) != 0 ||
+	    STATUS_GET_SBP_STATUS(orb->status) != 0) {
+		fw_error("error status: %d:%d\n",
+			 STATUS_GET_RESPONSE(orb->status),
+			 STATUS_GET_SBP_STATUS(orb->status));
+		goto out;
+	}
+
+	retval = 0;
+ out:
+	dma_unmap_single(device->card->device, orb->base.request_bus,
+			 sizeof(orb->request), DMA_TO_DEVICE);
+	dma_unmap_single(device->card->device, orb->response_bus,
+			 sizeof(orb->response), DMA_FROM_DEVICE);
+
+	if (response)
+		fw_memcpy_from_be32(response,
+				    orb->response, sizeof(orb->response));
+	kfree(orb);
+
+	return retval;
+}
+
+static void
+complete_agent_reset_write(struct fw_card *card, int rcode,
+			   void *payload, size_t length, void *data)
+{
+	struct fw_transaction *t = data;
+
+	kfree(t);
+}
+
+static int sbp2_agent_reset(struct fw_unit *unit)
+{
+	struct fw_device *device = fw_device(unit->device.parent);
+	struct sbp2_device *sd = unit->device.driver_data;
+	struct fw_transaction *t;
+	static u32 zero;
+
+	t = kzalloc(sizeof(*t), GFP_ATOMIC);
+	if (t == NULL)
+		return -ENOMEM;
+
+	fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST,
+			sd->node_id, sd->generation, SCODE_400,
+			sd->command_block_agent_address + SBP2_AGENT_RESET,
+			&zero, sizeof(zero), complete_agent_reset_write, t);
+
+	return 0;
+}
+
+static void sbp2_reconnect(struct work_struct *work);
+static struct scsi_host_template scsi_driver_template;
+
+static void
+release_sbp2_device(struct kref *kref)
+{
+	struct sbp2_device *sd = container_of(kref, struct sbp2_device, kref);
+	struct Scsi_Host *host =
+		container_of((void *)sd, struct Scsi_Host, hostdata[0]);
+
+	sbp2_send_management_orb(sd->unit, sd->node_id, sd->generation,
+				 SBP2_LOGOUT_REQUEST, sd->login_id, NULL);
+
+	scsi_remove_host(host);
+	fw_core_remove_address_handler(&sd->address_handler);
+	fw_notify("removed sbp2 unit %s\n", sd->unit->device.bus_id);
+	put_device(&sd->unit->device);
+	scsi_host_put(host);
+}
+
+static void sbp2_login(struct work_struct *work)
+{
+	struct sbp2_device *sd =
+		container_of(work, struct sbp2_device, work.work);
+	struct Scsi_Host *host =
+		container_of((void *)sd, struct Scsi_Host, hostdata[0]);
+	struct fw_unit *unit = sd->unit;
+	struct fw_device *device = fw_device(unit->device.parent);
+	struct sbp2_login_response response;
+	int generation, node_id, local_node_id, lun, retval;
+
+	/* FIXME: Make this work for multi-lun devices. */
+	lun = 0;
+
+	generation    = device->card->generation;
+	node_id       = device->node->node_id;
+	local_node_id = device->card->local_node->node_id;
+
+	if (sbp2_send_management_orb(unit, node_id, generation,
+				     SBP2_LOGIN_REQUEST, lun, &response) < 0) {
+		if (sd->retries++ < 5) {
+			schedule_delayed_work(&sd->work, DIV_ROUND_UP(HZ, 5));
+		} else {
+			fw_error("failed to login to %s\n",
+				 unit->device.bus_id);
+			kref_put(&sd->kref, release_sbp2_device);
+		}
+		return;
+	}
+
+	sd->generation   = generation;
+	sd->node_id      = node_id;
+	sd->address_high = local_node_id << 16;
+
+	/* Get command block agent offset and login id. */
+	sd->command_block_agent_address =
+		((u64) (response.command_block_agent.high & 0xffff) << 32) |
+		response.command_block_agent.low;
+	sd->login_id = LOGIN_RESPONSE_GET_LOGIN_ID(response);
+
+	fw_notify("logged in to sbp2 unit %s (%d retries)\n",
+		  unit->device.bus_id, sd->retries);
+	fw_notify(" - management_agent_address:    0x%012llx\n",
+		  (unsigned long long) sd->management_agent_address);
+	fw_notify(" - command_block_agent_address: 0x%012llx\n",
+		  (unsigned long long) sd->command_block_agent_address);
+	fw_notify(" - status write address:        0x%012llx\n",
+		  (unsigned long long) sd->address_handler.offset);
+
+#if 0
+	/* FIXME: The linux1394 sbp2 does this last step. */
+	sbp2_set_busy_timeout(scsi_id);
+#endif
+
+	PREPARE_DELAYED_WORK(&sd->work, sbp2_reconnect);
+	sbp2_agent_reset(unit);
+
+	/* FIXME: Loop over luns here. */
+	lun = 0;
+	retval = scsi_add_device(host, 0, 0, lun);
+	if (retval < 0) {
+		sbp2_send_management_orb(unit, sd->node_id, sd->generation,
+					 SBP2_LOGOUT_REQUEST, sd->login_id,
+					 NULL);
+		/*
+		 * Set this back to sbp2_login so we fall back and
+		 * retry login on bus reset.
+		 */
+		PREPARE_DELAYED_WORK(&sd->work, sbp2_login);
+	}
+	kref_put(&sd->kref, release_sbp2_device);
+}
+
+static int sbp2_probe(struct device *dev)
+{
+	struct fw_unit *unit = fw_unit(dev);
+	struct fw_device *device = fw_device(unit->device.parent);
+	struct sbp2_device *sd;
+	struct fw_csr_iterator ci;
+	struct Scsi_Host *host;
+	int i, key, value, err;
+	u32 model, firmware_revision;
+
+	err = -ENOMEM;
+	host = scsi_host_alloc(&scsi_driver_template, sizeof(*sd));
+	if (host == NULL)
+		goto fail;
+
+	sd = (struct sbp2_device *) host->hostdata;
+	unit->device.driver_data = sd;
+	sd->unit = unit;
+	INIT_LIST_HEAD(&sd->orb_list);
+	kref_init(&sd->kref);
+
+	sd->address_handler.length = 0x100;
+	sd->address_handler.address_callback = sbp2_status_write;
+	sd->address_handler.callback_data = sd;
+
+	err = fw_core_add_address_handler(&sd->address_handler,
+					  &fw_high_memory_region);
+	if (err < 0)
+		goto fail_host;
+
+	err = fw_device_enable_phys_dma(device);
+	if (err < 0)
+		goto fail_address_handler;
+
+	err = scsi_add_host(host, &unit->device);
+	if (err < 0)
+		goto fail_address_handler;
+
+	/*
+	 * Scan unit directory to get management agent address,
+	 * firmware revison and model.  Initialize firmware_revision
+	 * and model to values that wont match anything in our table.
+	 */
+	firmware_revision = 0xff000000;
+	model = 0xff000000;
+	fw_csr_iterator_init(&ci, unit->directory);
+	while (fw_csr_iterator_next(&ci, &key, &value)) {
+		switch (key) {
+		case CSR_DEPENDENT_INFO | CSR_OFFSET:
+			sd->management_agent_address =
+				0xfffff0000000ULL + 4 * value;
+			break;
+		case SBP2_FIRMWARE_REVISION:
+			firmware_revision = value;
+			break;
+		case CSR_MODEL:
+			model = value;
+			break;
+		}
+	}
+
+	for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) {
+		if (sbp2_workarounds_table[i].firmware_revision !=
+		    (firmware_revision & 0xffffff00))
+			continue;
+		if (sbp2_workarounds_table[i].model != model &&
+		    sbp2_workarounds_table[i].model != ~0)
+			continue;
+		sd->workarounds |= sbp2_workarounds_table[i].workarounds;
+		break;
+	}
+
+	if (sd->workarounds)
+		fw_notify("Workarounds for node %s: 0x%x "
+			  "(firmware_revision 0x%06x, model_id 0x%06x)\n",
+			  unit->device.bus_id,
+			  sd->workarounds, firmware_revision, model);
+
+	get_device(&unit->device);
+
+	/*
+	 * We schedule work to do the login so we can easily
+	 * reschedule retries. Always get the ref before scheduling
+	 * work.
+	 */
+	INIT_DELAYED_WORK(&sd->work, sbp2_login);
+	if (schedule_delayed_work(&sd->work, 0))
+		kref_get(&sd->kref);
+
+	return 0;
+
+ fail_address_handler:
+	fw_core_remove_address_handler(&sd->address_handler);
+ fail_host:
+	scsi_host_put(host);
+ fail:
+	return err;
+}
+
+static int sbp2_remove(struct device *dev)
+{
+	struct fw_unit *unit = fw_unit(dev);
+	struct sbp2_device *sd = unit->device.driver_data;
+
+	kref_put(&sd->kref, release_sbp2_device);
+
+	return 0;
+}
+
+static void sbp2_reconnect(struct work_struct *work)
+{
+	struct sbp2_device *sd =
+		container_of(work, struct sbp2_device, work.work);
+	struct fw_unit *unit = sd->unit;
+	struct fw_device *device = fw_device(unit->device.parent);
+	int generation, node_id, local_node_id;
+
+	generation    = device->card->generation;
+	node_id       = device->node->node_id;
+	local_node_id = device->card->local_node->node_id;
+
+	if (sbp2_send_management_orb(unit, node_id, generation,
+				     SBP2_RECONNECT_REQUEST,
+				     sd->login_id, NULL) < 0) {
+		if (sd->retries++ >= 5) {
+			fw_error("failed to reconnect to %s\n",
+				 unit->device.bus_id);
+			/* Fall back and try to log in again. */
+			sd->retries = 0;
+			PREPARE_DELAYED_WORK(&sd->work, sbp2_login);
+		}
+		schedule_delayed_work(&sd->work, DIV_ROUND_UP(HZ, 5));
+		return;
+	}
+
+	sd->generation   = generation;
+	sd->node_id      = node_id;
+	sd->address_high = local_node_id << 16;
+
+	fw_notify("reconnected to unit %s (%d retries)\n",
+		  unit->device.bus_id, sd->retries);
+	sbp2_agent_reset(unit);
+	sbp2_cancel_orbs(unit);
+	kref_put(&sd->kref, release_sbp2_device);
+}
+
+static void sbp2_update(struct fw_unit *unit)
+{
+	struct fw_device *device = fw_device(unit->device.parent);
+	struct sbp2_device *sd = unit->device.driver_data;
+
+	sd->retries = 0;
+	fw_device_enable_phys_dma(device);
+	if (schedule_delayed_work(&sd->work, 0))
+		kref_get(&sd->kref);
+}
+
+#define SBP2_UNIT_SPEC_ID_ENTRY	0x0000609e
+#define SBP2_SW_VERSION_ENTRY	0x00010483
+
+static const struct fw_device_id sbp2_id_table[] = {
+	{
+		.match_flags  = FW_MATCH_SPECIFIER_ID | FW_MATCH_VERSION,
+		.specifier_id = SBP2_UNIT_SPEC_ID_ENTRY,
+		.version      = SBP2_SW_VERSION_ENTRY,
+	},
+	{ }
+};
+
+static struct fw_driver sbp2_driver = {
+	.driver   = {
+		.owner  = THIS_MODULE,
+		.name   = sbp2_driver_name,
+		.bus    = &fw_bus_type,
+		.probe  = sbp2_probe,
+		.remove = sbp2_remove,
+	},
+	.update   = sbp2_update,
+	.id_table = sbp2_id_table,
+};
+
+static unsigned int
+sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data)
+{
+	int sam_status;
+
+	sense_data[0] = 0x70;
+	sense_data[1] = 0x0;
+	sense_data[2] = sbp2_status[1];
+	sense_data[3] = sbp2_status[4];
+	sense_data[4] = sbp2_status[5];
+	sense_data[5] = sbp2_status[6];
+	sense_data[6] = sbp2_status[7];
+	sense_data[7] = 10;
+	sense_data[8] = sbp2_status[8];
+	sense_data[9] = sbp2_status[9];
+	sense_data[10] = sbp2_status[10];
+	sense_data[11] = sbp2_status[11];
+	sense_data[12] = sbp2_status[2];
+	sense_data[13] = sbp2_status[3];
+	sense_data[14] = sbp2_status[12];
+	sense_data[15] = sbp2_status[13];
+
+	sam_status = sbp2_status[0] & 0x3f;
+
+	switch (sam_status) {
+	case SAM_STAT_GOOD:
+	case SAM_STAT_CHECK_CONDITION:
+	case SAM_STAT_CONDITION_MET:
+	case SAM_STAT_BUSY:
+	case SAM_STAT_RESERVATION_CONFLICT:
+	case SAM_STAT_COMMAND_TERMINATED:
+		return DID_OK << 16 | sam_status;
+
+	default:
+		return DID_ERROR << 16;
+	}
+}
+
+static void
+complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
+{
+	struct sbp2_command_orb *orb = (struct sbp2_command_orb *)base_orb;
+	struct fw_unit *unit = orb->unit;
+	struct fw_device *device = fw_device(unit->device.parent);
+	struct scatterlist *sg;
+	int result;
+
+	if (status != NULL) {
+		if (STATUS_GET_DEAD(*status))
+			sbp2_agent_reset(unit);
+
+		switch (STATUS_GET_RESPONSE(*status)) {
+		case SBP2_STATUS_REQUEST_COMPLETE:
+			result = DID_OK << 16;
+			break;
+		case SBP2_STATUS_TRANSPORT_FAILURE:
+			result = DID_BUS_BUSY << 16;
+			break;
+		case SBP2_STATUS_ILLEGAL_REQUEST:
+		case SBP2_STATUS_VENDOR_DEPENDENT:
+		default:
+			result = DID_ERROR << 16;
+			break;
+		}
+
+		if (result == DID_OK << 16 && STATUS_GET_LEN(*status) > 1)
+			result = sbp2_status_to_sense_data(STATUS_GET_DATA(*status),
+							   orb->cmd->sense_buffer);
+	} else {
+		/*
+		 * If the orb completes with status == NULL, something
+		 * went wrong, typically a bus reset happened mid-orb
+		 * or when sending the write (less likely).
+		 */
+		result = DID_BUS_BUSY << 16;
+	}
+
+	dma_unmap_single(device->card->device, orb->base.request_bus,
+			 sizeof(orb->request), DMA_TO_DEVICE);
+
+	if (orb->cmd->use_sg > 0) {
+		sg = (struct scatterlist *)orb->cmd->request_buffer;
+		dma_unmap_sg(device->card->device, sg, orb->cmd->use_sg,
+			     orb->cmd->sc_data_direction);
+	}
+
+	if (orb->page_table_bus != 0)
+		dma_unmap_single(device->card->device, orb->page_table_bus,
+				 sizeof(orb->page_table_bus), DMA_TO_DEVICE);
+
+	if (orb->request_buffer_bus != 0)
+		dma_unmap_single(device->card->device, orb->request_buffer_bus,
+				 sizeof(orb->request_buffer_bus),
+				 DMA_FROM_DEVICE);
+
+	orb->cmd->result = result;
+	orb->done(orb->cmd);
+	kfree(orb);
+}
+
+static int sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb)
+{
+	struct sbp2_device *sd =
+		(struct sbp2_device *)orb->cmd->device->host->hostdata;
+	struct fw_unit *unit = sd->unit;
+	struct fw_device *device = fw_device(unit->device.parent);
+	struct scatterlist *sg;
+	int sg_len, l, i, j, count;
+	size_t size;
+	dma_addr_t sg_addr;
+
+	sg = (struct scatterlist *)orb->cmd->request_buffer;
+	count = dma_map_sg(device->card->device, sg, orb->cmd->use_sg,
+			   orb->cmd->sc_data_direction);
+	if (count == 0)
+		goto fail;
+
+	/*
+	 * Handle the special case where there is only one element in
+	 * the scatter list by converting it to an immediate block
+	 * request. This is also a workaround for broken devices such
+	 * as the second generation iPod which doesn't support page
+	 * tables.
+	 */
+	if (count == 1 && sg_dma_len(sg) < SBP2_MAX_SG_ELEMENT_LENGTH) {
+		orb->request.data_descriptor.high = sd->address_high;
+		orb->request.data_descriptor.low  = sg_dma_address(sg);
+		orb->request.misc |=
+			COMMAND_ORB_DATA_SIZE(sg_dma_len(sg));
+		return 0;
+	}
+
+	/*
+	 * Convert the scatterlist to an sbp2 page table.  If any
+	 * scatterlist entries are too big for sbp2, we split them as we
+	 * go.  Even if we ask the block I/O layer to not give us sg
+	 * elements larger than 65535 bytes, some IOMMUs may merge sg elements
+	 * during DMA mapping, and Linux currently doesn't prevent this.
+	 */
+	for (i = 0, j = 0; i < count; i++) {
+		sg_len = sg_dma_len(sg + i);
+		sg_addr = sg_dma_address(sg + i);
+		while (sg_len) {
+			l = min(sg_len, SBP2_MAX_SG_ELEMENT_LENGTH);
+			orb->page_table[j].low = sg_addr;
+			orb->page_table[j].high = (l << 16);
+			sg_addr += l;
+			sg_len -= l;
+			j++;
+		}
+	}
+
+	size = sizeof(orb->page_table[0]) * j;
+
+	/*
+	 * The data_descriptor pointer is the one case where we need
+	 * to fill in the node ID part of the address.  All other
+	 * pointers assume that the data referenced reside on the
+	 * initiator (i.e. us), but data_descriptor can refer to data
+	 * on other nodes so we need to put our ID in descriptor.high.
+	 */
+
+	orb->page_table_bus =
+		dma_map_single(device->card->device, orb->page_table,
+			       size, DMA_TO_DEVICE);
+	if (dma_mapping_error(orb->page_table_bus))
+		goto fail_page_table;
+	orb->request.data_descriptor.high = sd->address_high;
+	orb->request.data_descriptor.low  = orb->page_table_bus;
+	orb->request.misc |=
+		COMMAND_ORB_PAGE_TABLE_PRESENT |
+		COMMAND_ORB_DATA_SIZE(j);
+
+	fw_memcpy_to_be32(orb->page_table, orb->page_table, size);
+
+	return 0;
+
+ fail_page_table:
+	dma_unmap_sg(device->card->device, sg, orb->cmd->use_sg,
+		     orb->cmd->sc_data_direction);
+ fail:
+	return -ENOMEM;
+}
+
+/* SCSI stack integration */
+
+static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
+{
+	struct sbp2_device *sd =
+		(struct sbp2_device *)cmd->device->host->hostdata;
+	struct fw_unit *unit = sd->unit;
+	struct fw_device *device = fw_device(unit->device.parent);
+	struct sbp2_command_orb *orb;
+
+	/*
+	 * Bidirectional commands are not yet implemented, and unknown
+	 * transfer direction not handled.
+	 */
+	if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) {
+		fw_error("Cannot handle DMA_BIDIRECTIONAL - rejecting command");
+		cmd->result = DID_ERROR << 16;
+		done(cmd);
+		return 0;
+	}
+
+	orb = kzalloc(sizeof(*orb), GFP_ATOMIC);
+	if (orb == NULL) {
+		fw_notify("failed to alloc orb\n");
+		goto fail_alloc;
+	}
+
+	/* Initialize rcode to something not RCODE_COMPLETE. */
+	orb->base.rcode = -1;
+	orb->base.request_bus =
+		dma_map_single(device->card->device, &orb->request,
+			       sizeof(orb->request), DMA_TO_DEVICE);
+	if (dma_mapping_error(orb->base.request_bus))
+		goto fail_mapping;
+
+	orb->unit = unit;
+	orb->done = done;
+	orb->cmd  = cmd;
+
+	orb->request.next.high   = SBP2_ORB_NULL;
+	orb->request.next.low    = 0x0;
+	/*
+	 * At speed 100 we can do 512 bytes per packet, at speed 200,
+	 * 1024 bytes per packet etc.  The SBP-2 max_payload field
+	 * specifies the max payload size as 2 ^ (max_payload + 2), so
+	 * if we set this to max_speed + 7, we get the right value.
+	 */
+	orb->request.misc =
+		COMMAND_ORB_MAX_PAYLOAD(device->node->max_speed + 7) |
+		COMMAND_ORB_SPEED(device->node->max_speed) |
+		COMMAND_ORB_NOTIFY;
+
+	if (cmd->sc_data_direction == DMA_FROM_DEVICE)
+		orb->request.misc |=
+			COMMAND_ORB_DIRECTION(SBP2_DIRECTION_FROM_MEDIA);
+	else if (cmd->sc_data_direction == DMA_TO_DEVICE)
+		orb->request.misc |=
+			COMMAND_ORB_DIRECTION(SBP2_DIRECTION_TO_MEDIA);
+
+	if (cmd->use_sg && sbp2_command_orb_map_scatterlist(orb) < 0)
+		goto fail_map_payload;
+
+	fw_memcpy_to_be32(&orb->request, &orb->request, sizeof(orb->request));
+
+	memset(orb->request.command_block,
+	       0, sizeof(orb->request.command_block));
+	memcpy(orb->request.command_block, cmd->cmnd, COMMAND_SIZE(*cmd->cmnd));
+
+	orb->base.callback = complete_command_orb;
+
+	sbp2_send_orb(&orb->base, unit, sd->node_id, sd->generation,
+		      sd->command_block_agent_address + SBP2_ORB_POINTER);
+
+	return 0;
+
+ fail_map_payload:
+	dma_unmap_single(device->card->device, orb->base.request_bus,
+			 sizeof(orb->request), DMA_TO_DEVICE);
+ fail_mapping:
+	kfree(orb);
+ fail_alloc:
+	return SCSI_MLQUEUE_HOST_BUSY;
+}
+
+static int sbp2_scsi_slave_alloc(struct scsi_device *sdev)
+{
+	struct sbp2_device *sd = (struct sbp2_device *)sdev->host->hostdata;
+
+	sdev->allow_restart = 1;
+
+	if (sd->workarounds & SBP2_WORKAROUND_INQUIRY_36)
+		sdev->inquiry_len = 36;
+	return 0;
+}
+
+static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
+{
+	struct sbp2_device *sd = (struct sbp2_device *)sdev->host->hostdata;
+	struct fw_unit *unit = sd->unit;
+
+	sdev->use_10_for_rw = 1;
+
+	if (sdev->type == TYPE_ROM)
+		sdev->use_10_for_ms = 1;
+	if (sdev->type == TYPE_DISK &&
+	    sd->workarounds & SBP2_WORKAROUND_MODE_SENSE_8)
+		sdev->skip_ms_page_8 = 1;
+	if (sd->workarounds & SBP2_WORKAROUND_FIX_CAPACITY) {
+		fw_notify("setting fix_capacity for %s\n", unit->device.bus_id);
+		sdev->fix_capacity = 1;
+	}
+
+	return 0;
+}
+
+/*
+ * Called by scsi stack when something has really gone wrong.  Usually
+ * called when a command has timed-out for some reason.
+ */
+static int sbp2_scsi_abort(struct scsi_cmnd *cmd)
+{
+	struct sbp2_device *sd =
+		(struct sbp2_device *)cmd->device->host->hostdata;
+	struct fw_unit *unit = sd->unit;
+
+	fw_notify("sbp2_scsi_abort\n");
+	sbp2_agent_reset(unit);
+	sbp2_cancel_orbs(unit);
+
+	return SUCCESS;
+}
+
+static struct scsi_host_template scsi_driver_template = {
+	.module			= THIS_MODULE,
+	.name			= "SBP-2 IEEE-1394",
+	.proc_name		= (char *)sbp2_driver_name,
+	.queuecommand		= sbp2_scsi_queuecommand,
+	.slave_alloc		= sbp2_scsi_slave_alloc,
+	.slave_configure	= sbp2_scsi_slave_configure,
+	.eh_abort_handler	= sbp2_scsi_abort,
+	.this_id		= -1,
+	.sg_tablesize		= SG_ALL,
+	.use_clustering		= ENABLE_CLUSTERING,
+	.cmd_per_lun		= 1,
+	.can_queue		= 1,
+};
+
+MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
+MODULE_DESCRIPTION("SCSI over IEEE1394");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table);
+
+/* Provide a module alias so root-on-sbp2 initrds don't break. */
+#ifndef CONFIG_IEEE1394_SBP2_MODULE
+MODULE_ALIAS("sbp2");
+#endif
+
+static int __init sbp2_init(void)
+{
+	return driver_register(&sbp2_driver.driver);
+}
+
+static void __exit sbp2_cleanup(void)
+{
+	driver_unregister(&sbp2_driver.driver);
+}
+
+module_init(sbp2_init);
+module_exit(sbp2_cleanup);
diff --git a/drivers/firewire/fw-topology.c b/drivers/firewire/fw-topology.c
new file mode 100644
index 000000000000..7aebb8ae0efa
--- /dev/null
+++ b/drivers/firewire/fw-topology.c
@@ -0,0 +1,537 @@
+/*
+ * Incremental bus scan, based on bus topology
+ *
+ * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/wait.h>
+#include <linux/errno.h>
+#include "fw-transaction.h"
+#include "fw-topology.h"
+
+#define SELF_ID_PHY_ID(q)		(((q) >> 24) & 0x3f)
+#define SELF_ID_EXTENDED(q)		(((q) >> 23) & 0x01)
+#define SELF_ID_LINK_ON(q)		(((q) >> 22) & 0x01)
+#define SELF_ID_GAP_COUNT(q)		(((q) >> 16) & 0x3f)
+#define SELF_ID_PHY_SPEED(q)		(((q) >> 14) & 0x03)
+#define SELF_ID_CONTENDER(q)		(((q) >> 11) & 0x01)
+#define SELF_ID_PHY_INITIATOR(q)	(((q) >>  1) & 0x01)
+#define SELF_ID_MORE_PACKETS(q)		(((q) >>  0) & 0x01)
+
+#define SELF_ID_EXT_SEQUENCE(q)		(((q) >> 20) & 0x07)
+
+static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count)
+{
+	u32 q;
+	int port_type, shift, seq;
+
+	*total_port_count = 0;
+	*child_port_count = 0;
+
+	shift = 6;
+	q = *sid;
+	seq = 0;
+
+	while (1) {
+		port_type = (q >> shift) & 0x03;
+		switch (port_type) {
+		case SELFID_PORT_CHILD:
+			(*child_port_count)++;
+		case SELFID_PORT_PARENT:
+		case SELFID_PORT_NCONN:
+			(*total_port_count)++;
+		case SELFID_PORT_NONE:
+			break;
+		}
+
+		shift -= 2;
+		if (shift == 0) {
+			if (!SELF_ID_MORE_PACKETS(q))
+				return sid + 1;
+
+			shift = 16;
+			sid++;
+			q = *sid;
+
+			/*
+			 * Check that the extra packets actually are
+			 * extended self ID packets and that the
+			 * sequence numbers in the extended self ID
+			 * packets increase as expected.
+			 */
+
+			if (!SELF_ID_EXTENDED(q) ||
+			    seq != SELF_ID_EXT_SEQUENCE(q))
+				return NULL;
+
+			seq++;
+		}
+	}
+}
+
+static int get_port_type(u32 *sid, int port_index)
+{
+	int index, shift;
+
+	index = (port_index + 5) / 8;
+	shift = 16 - ((port_index + 5) & 7) * 2;
+	return (sid[index] >> shift) & 0x03;
+}
+
+static struct fw_node *fw_node_create(u32 sid, int port_count, int color)
+{
+	struct fw_node *node;
+
+	node = kzalloc(sizeof(*node) + port_count * sizeof(node->ports[0]),
+		       GFP_ATOMIC);
+	if (node == NULL)
+		return NULL;
+
+	node->color = color;
+	node->node_id = LOCAL_BUS | SELF_ID_PHY_ID(sid);
+	node->link_on = SELF_ID_LINK_ON(sid);
+	node->phy_speed = SELF_ID_PHY_SPEED(sid);
+	node->port_count = port_count;
+
+	atomic_set(&node->ref_count, 1);
+	INIT_LIST_HEAD(&node->link);
+
+	return node;
+}
+
+/*
+ * Compute the maximum hop count for this node and it's children.  The
+ * maximum hop count is the maximum number of connections between any
+ * two nodes in the subtree rooted at this node.  We need this for
+ * setting the gap count.  As we build the tree bottom up in
+ * build_tree() below, this is fairly easy to do: for each node we
+ * maintain the max hop count and the max depth, ie the number of hops
+ * to the furthest leaf.  Computing the max hop count breaks down into
+ * two cases: either the path goes through this node, in which case
+ * the hop count is the sum of the two biggest child depths plus 2.
+ * Or it could be the case that the max hop path is entirely
+ * containted in a child tree, in which case the max hop count is just
+ * the max hop count of this child.
+ */
+static void update_hop_count(struct fw_node *node)
+{
+	int depths[2] = { -1, -1 };
+	int max_child_hops = 0;
+	int i;
+
+	for (i = 0; i < node->port_count; i++) {
+		if (node->ports[i].node == NULL)
+			continue;
+
+		if (node->ports[i].node->max_hops > max_child_hops)
+			max_child_hops = node->ports[i].node->max_hops;
+
+		if (node->ports[i].node->max_depth > depths[0]) {
+			depths[1] = depths[0];
+			depths[0] = node->ports[i].node->max_depth;
+		} else if (node->ports[i].node->max_depth > depths[1])
+			depths[1] = node->ports[i].node->max_depth;
+	}
+
+	node->max_depth = depths[0] + 1;
+	node->max_hops = max(max_child_hops, depths[0] + depths[1] + 2);
+}
+
+
+/**
+ * build_tree - Build the tree representation of the topology
+ * @self_ids: array of self IDs to create the tree from
+ * @self_id_count: the length of the self_ids array
+ * @local_id: the node ID of the local node
+ *
+ * This function builds the tree representation of the topology given
+ * by the self IDs from the latest bus reset.  During the construction
+ * of the tree, the function checks that the self IDs are valid and
+ * internally consistent.  On succcess this funtions returns the
+ * fw_node corresponding to the local card otherwise NULL.
+ */
+static struct fw_node *build_tree(struct fw_card *card,
+				  u32 *sid, int self_id_count)
+{
+	struct fw_node *node, *child, *local_node, *irm_node;
+	struct list_head stack, *h;
+	u32 *next_sid, *end, q;
+	int i, port_count, child_port_count, phy_id, parent_count, stack_depth;
+	int gap_count, topology_type;
+
+	local_node = NULL;
+	node = NULL;
+	INIT_LIST_HEAD(&stack);
+	stack_depth = 0;
+	end = sid + self_id_count;
+	phy_id = 0;
+	irm_node = NULL;
+	gap_count = SELF_ID_GAP_COUNT(*sid);
+	topology_type = 0;
+
+	while (sid < end) {
+		next_sid = count_ports(sid, &port_count, &child_port_count);
+
+		if (next_sid == NULL) {
+			fw_error("Inconsistent extended self IDs.\n");
+			return NULL;
+		}
+
+		q = *sid;
+		if (phy_id != SELF_ID_PHY_ID(q)) {
+			fw_error("PHY ID mismatch in self ID: %d != %d.\n",
+				 phy_id, SELF_ID_PHY_ID(q));
+			return NULL;
+		}
+
+		if (child_port_count > stack_depth) {
+			fw_error("Topology stack underflow\n");
+			return NULL;
+		}
+
+		/*
+		 * Seek back from the top of our stack to find the
+		 * start of the child nodes for this node.
+		 */
+		for (i = 0, h = &stack; i < child_port_count; i++)
+			h = h->prev;
+		child = fw_node(h);
+
+		node = fw_node_create(q, port_count, card->color);
+		if (node == NULL) {
+			fw_error("Out of memory while building topology.");
+			return NULL;
+		}
+
+		if (phy_id == (card->node_id & 0x3f))
+			local_node = node;
+
+		if (SELF_ID_CONTENDER(q))
+			irm_node = node;
+
+		if (node->phy_speed == SCODE_BETA)
+			topology_type |= FW_TOPOLOGY_B;
+		else
+			topology_type |= FW_TOPOLOGY_A;
+
+		parent_count = 0;
+
+		for (i = 0; i < port_count; i++) {
+			switch (get_port_type(sid, i)) {
+			case SELFID_PORT_PARENT:
+				/*
+				 * Who's your daddy?  We dont know the
+				 * parent node at this time, so we
+				 * temporarily abuse node->color for
+				 * remembering the entry in the
+				 * node->ports array where the parent
+				 * node should be.  Later, when we
+				 * handle the parent node, we fix up
+				 * the reference.
+				 */
+				parent_count++;
+				node->color = i;
+				break;
+
+			case SELFID_PORT_CHILD:
+				node->ports[i].node = child;
+				/*
+				 * Fix up parent reference for this
+				 * child node.
+				 */
+				child->ports[child->color].node = node;
+				child->color = card->color;
+				child = fw_node(child->link.next);
+				break;
+			}
+		}
+
+		/*
+		 * Check that the node reports exactly one parent
+		 * port, except for the root, which of course should
+		 * have no parents.
+		 */
+		if ((next_sid == end && parent_count != 0) ||
+		    (next_sid < end && parent_count != 1)) {
+			fw_error("Parent port inconsistency for node %d: "
+				 "parent_count=%d\n", phy_id, parent_count);
+			return NULL;
+		}
+
+		/* Pop the child nodes off the stack and push the new node. */
+		__list_del(h->prev, &stack);
+		list_add_tail(&node->link, &stack);
+		stack_depth += 1 - child_port_count;
+
+		/*
+		 * If all PHYs does not report the same gap count
+		 * setting, we fall back to 63 which will force a gap
+		 * count reconfiguration and a reset.
+		 */
+		if (SELF_ID_GAP_COUNT(q) != gap_count)
+			gap_count = 63;
+
+		update_hop_count(node);
+
+		sid = next_sid;
+		phy_id++;
+	}
+
+	card->root_node = node;
+	card->irm_node = irm_node;
+	card->gap_count = gap_count;
+	card->topology_type = topology_type;
+
+	return local_node;
+}
+
+typedef void (*fw_node_callback_t)(struct fw_card * card,
+				   struct fw_node * node,
+				   struct fw_node * parent);
+
+static void
+for_each_fw_node(struct fw_card *card, struct fw_node *root,
+		 fw_node_callback_t callback)
+{
+	struct list_head list;
+	struct fw_node *node, *next, *child, *parent;
+	int i;
+
+	INIT_LIST_HEAD(&list);
+
+	fw_node_get(root);
+	list_add_tail(&root->link, &list);
+	parent = NULL;
+	list_for_each_entry(node, &list, link) {
+		node->color = card->color;
+
+		for (i = 0; i < node->port_count; i++) {
+			child = node->ports[i].node;
+			if (!child)
+				continue;
+			if (child->color == card->color)
+				parent = child;
+			else {
+				fw_node_get(child);
+				list_add_tail(&child->link, &list);
+			}
+		}
+
+		callback(card, node, parent);
+	}
+
+	list_for_each_entry_safe(node, next, &list, link)
+		fw_node_put(node);
+}
+
+static void
+report_lost_node(struct fw_card *card,
+		 struct fw_node *node, struct fw_node *parent)
+{
+	fw_node_event(card, node, FW_NODE_DESTROYED);
+	fw_node_put(node);
+}
+
+static void
+report_found_node(struct fw_card *card,
+		  struct fw_node *node, struct fw_node *parent)
+{
+	int b_path = (node->phy_speed == SCODE_BETA);
+
+	if (parent != NULL) {
+		/* min() macro doesn't work here with gcc 3.4 */
+		node->max_speed = parent->max_speed < node->phy_speed ?
+					parent->max_speed : node->phy_speed;
+		node->b_path = parent->b_path && b_path;
+	} else {
+		node->max_speed = node->phy_speed;
+		node->b_path = b_path;
+	}
+
+	fw_node_event(card, node, FW_NODE_CREATED);
+}
+
+void fw_destroy_nodes(struct fw_card *card)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&card->lock, flags);
+	card->color++;
+	if (card->local_node != NULL)
+		for_each_fw_node(card, card->local_node, report_lost_node);
+	spin_unlock_irqrestore(&card->lock, flags);
+}
+
+static void move_tree(struct fw_node *node0, struct fw_node *node1, int port)
+{
+	struct fw_node *tree;
+	int i;
+
+	tree = node1->ports[port].node;
+	node0->ports[port].node = tree;
+	for (i = 0; i < tree->port_count; i++) {
+		if (tree->ports[i].node == node1) {
+			tree->ports[i].node = node0;
+			break;
+		}
+	}
+}
+
+/**
+ * update_tree - compare the old topology tree for card with the new
+ * one specified by root.  Queue the nodes and mark them as either
+ * found, lost or updated.  Update the nodes in the card topology tree
+ * as we go.
+ */
+static void
+update_tree(struct fw_card *card, struct fw_node *root)
+{
+	struct list_head list0, list1;
+	struct fw_node *node0, *node1;
+	int i, event;
+
+	INIT_LIST_HEAD(&list0);
+	list_add_tail(&card->local_node->link, &list0);
+	INIT_LIST_HEAD(&list1);
+	list_add_tail(&root->link, &list1);
+
+	node0 = fw_node(list0.next);
+	node1 = fw_node(list1.next);
+
+	while (&node0->link != &list0) {
+
+		/* assert(node0->port_count == node1->port_count); */
+		if (node0->link_on && !node1->link_on)
+			event = FW_NODE_LINK_OFF;
+		else if (!node0->link_on && node1->link_on)
+			event = FW_NODE_LINK_ON;
+		else
+			event = FW_NODE_UPDATED;
+
+		node0->node_id = node1->node_id;
+		node0->color = card->color;
+		node0->link_on = node1->link_on;
+		node0->initiated_reset = node1->initiated_reset;
+		node0->max_hops = node1->max_hops;
+		node1->color = card->color;
+		fw_node_event(card, node0, event);
+
+		if (card->root_node == node1)
+			card->root_node = node0;
+		if (card->irm_node == node1)
+			card->irm_node = node0;
+
+		for (i = 0; i < node0->port_count; i++) {
+			if (node0->ports[i].node && node1->ports[i].node) {
+				/*
+				 * This port didn't change, queue the
+				 * connected node for further
+				 * investigation.
+				 */
+				if (node0->ports[i].node->color == card->color)
+					continue;
+				list_add_tail(&node0->ports[i].node->link,
+					      &list0);
+				list_add_tail(&node1->ports[i].node->link,
+					      &list1);
+			} else if (node0->ports[i].node) {
+				/*
+				 * The nodes connected here were
+				 * unplugged; unref the lost nodes and
+				 * queue FW_NODE_LOST callbacks for
+				 * them.
+				 */
+
+				for_each_fw_node(card, node0->ports[i].node,
+						 report_lost_node);
+				node0->ports[i].node = NULL;
+			} else if (node1->ports[i].node) {
+				/*
+				 * One or more node were connected to
+				 * this port. Move the new nodes into
+				 * the tree and queue FW_NODE_CREATED
+				 * callbacks for them.
+				 */
+				move_tree(node0, node1, i);
+				for_each_fw_node(card, node0->ports[i].node,
+						 report_found_node);
+			}
+		}
+
+		node0 = fw_node(node0->link.next);
+		node1 = fw_node(node1->link.next);
+	}
+}
+
+static void
+update_topology_map(struct fw_card *card, u32 *self_ids, int self_id_count)
+{
+	int node_count;
+
+	card->topology_map[1]++;
+	node_count = (card->root_node->node_id & 0x3f) + 1;
+	card->topology_map[2] = (node_count << 16) | self_id_count;
+	card->topology_map[0] = (self_id_count + 2) << 16;
+	memcpy(&card->topology_map[3], self_ids, self_id_count * 4);
+	fw_compute_block_crc(card->topology_map);
+}
+
+void
+fw_core_handle_bus_reset(struct fw_card *card,
+			 int node_id, int generation,
+			 int self_id_count, u32 * self_ids)
+{
+	struct fw_node *local_node;
+	unsigned long flags;
+
+	fw_flush_transactions(card);
+
+	spin_lock_irqsave(&card->lock, flags);
+
+	/*
+	 * If the new topology has a different self_id_count the topology
+	 * changed, either nodes were added or removed. In that case we
+	 * reset the IRM reset counter.
+	 */
+	if (card->self_id_count != self_id_count)
+		card->bm_retries = 0;
+
+	card->node_id = node_id;
+	card->generation = generation;
+	card->reset_jiffies = jiffies;
+	schedule_delayed_work(&card->work, 0);
+
+	local_node = build_tree(card, self_ids, self_id_count);
+
+	update_topology_map(card, self_ids, self_id_count);
+
+	card->color++;
+
+	if (local_node == NULL) {
+		fw_error("topology build failed\n");
+		/* FIXME: We need to issue a bus reset in this case. */
+	} else if (card->local_node == NULL) {
+		card->local_node = local_node;
+		for_each_fw_node(card, local_node, report_found_node);
+	} else {
+		update_tree(card, local_node);
+	}
+
+	spin_unlock_irqrestore(&card->lock, flags);
+}
+EXPORT_SYMBOL(fw_core_handle_bus_reset);
diff --git a/drivers/firewire/fw-topology.h b/drivers/firewire/fw-topology.h
new file mode 100644
index 000000000000..363b6cbcd0b3
--- /dev/null
+++ b/drivers/firewire/fw-topology.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __fw_topology_h
+#define __fw_topology_h
+
+enum {
+	FW_TOPOLOGY_A =		0x01,
+	FW_TOPOLOGY_B =		0x02,
+	FW_TOPOLOGY_MIXED =	0x03,
+};
+
+enum {
+	FW_NODE_CREATED =   0x00,
+	FW_NODE_UPDATED =   0x01,
+	FW_NODE_DESTROYED = 0x02,
+	FW_NODE_LINK_ON =   0x03,
+	FW_NODE_LINK_OFF =  0x04,
+};
+
+struct fw_port {
+	struct fw_node *node;
+	unsigned speed : 3; /* S100, S200, ... S3200 */
+};
+
+struct fw_node {
+	u16 node_id;
+	u8 color;
+	u8 port_count;
+	unsigned link_on : 1;
+	unsigned initiated_reset : 1;
+	unsigned b_path : 1;
+	u8 phy_speed : 3; /* As in the self ID packet. */
+	u8 max_speed : 5; /* Minimum of all phy-speeds and port speeds on
+			   * the path from the local node to this node. */
+	u8 max_depth : 4; /* Maximum depth to any leaf node */
+	u8 max_hops : 4;  /* Max hops in this sub tree */
+	atomic_t ref_count;
+
+	/* For serializing node topology into a list. */
+	struct list_head link;
+
+	/* Upper layer specific data. */
+	void *data;
+
+	struct fw_port ports[0];
+};
+
+static inline struct fw_node *
+fw_node(struct list_head *l)
+{
+	return list_entry(l, struct fw_node, link);
+}
+
+static inline struct fw_node *
+fw_node_get(struct fw_node *node)
+{
+	atomic_inc(&node->ref_count);
+
+	return node;
+}
+
+static inline void
+fw_node_put(struct fw_node *node)
+{
+	if (atomic_dec_and_test(&node->ref_count))
+		kfree(node);
+}
+
+void
+fw_destroy_nodes(struct fw_card *card);
+
+int
+fw_compute_block_crc(u32 *block);
+
+
+#endif /* __fw_topology_h */
diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c
new file mode 100644
index 000000000000..80d0121463d0
--- /dev/null
+++ b/drivers/firewire/fw-transaction.c
@@ -0,0 +1,910 @@
+/*
+ * Core IEEE1394 transaction logic
+ *
+ * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/poll.h>
+#include <linux/list.h>
+#include <linux/kthread.h>
+#include <asm/uaccess.h>
+#include <asm/semaphore.h>
+
+#include "fw-transaction.h"
+#include "fw-topology.h"
+#include "fw-device.h"
+
+#define HEADER_PRI(pri)			((pri) << 0)
+#define HEADER_TCODE(tcode)		((tcode) << 4)
+#define HEADER_RETRY(retry)		((retry) << 8)
+#define HEADER_TLABEL(tlabel)		((tlabel) << 10)
+#define HEADER_DESTINATION(destination)	((destination) << 16)
+#define HEADER_SOURCE(source)		((source) << 16)
+#define HEADER_RCODE(rcode)		((rcode) << 12)
+#define HEADER_OFFSET_HIGH(offset_high)	((offset_high) << 0)
+#define HEADER_DATA_LENGTH(length)	((length) << 16)
+#define HEADER_EXTENDED_TCODE(tcode)	((tcode) << 0)
+
+#define HEADER_GET_TCODE(q)		(((q) >> 4) & 0x0f)
+#define HEADER_GET_TLABEL(q)		(((q) >> 10) & 0x3f)
+#define HEADER_GET_RCODE(q)		(((q) >> 12) & 0x0f)
+#define HEADER_GET_DESTINATION(q)	(((q) >> 16) & 0xffff)
+#define HEADER_GET_SOURCE(q)		(((q) >> 16) & 0xffff)
+#define HEADER_GET_OFFSET_HIGH(q)	(((q) >> 0) & 0xffff)
+#define HEADER_GET_DATA_LENGTH(q)	(((q) >> 16) & 0xffff)
+#define HEADER_GET_EXTENDED_TCODE(q)	(((q) >> 0) & 0xffff)
+
+#define PHY_CONFIG_GAP_COUNT(gap_count)	(((gap_count) << 16) | (1 << 22))
+#define PHY_CONFIG_ROOT_ID(node_id)	((((node_id) & 0x3f) << 24) | (1 << 23))
+#define PHY_IDENTIFIER(id)		((id) << 30)
+
+static int
+close_transaction(struct fw_transaction *transaction,
+		  struct fw_card *card, int rcode,
+		  u32 *payload, size_t length)
+{
+	struct fw_transaction *t;
+	unsigned long flags;
+
+	spin_lock_irqsave(&card->lock, flags);
+	list_for_each_entry(t, &card->transaction_list, link) {
+		if (t == transaction) {
+			list_del(&t->link);
+			card->tlabel_mask &= ~(1 << t->tlabel);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&card->lock, flags);
+
+	if (&t->link != &card->transaction_list) {
+		t->callback(card, rcode, payload, length, t->callback_data);
+		return 0;
+	}
+
+	return -ENOENT;
+}
+
+/*
+ * Only valid for transactions that are potentially pending (ie have
+ * been sent).
+ */
+int
+fw_cancel_transaction(struct fw_card *card,
+		      struct fw_transaction *transaction)
+{
+	/*
+	 * Cancel the packet transmission if it's still queued.  That
+	 * will call the packet transmission callback which cancels
+	 * the transaction.
+	 */
+
+	if (card->driver->cancel_packet(card, &transaction->packet) == 0)
+		return 0;
+
+	/*
+	 * If the request packet has already been sent, we need to see
+	 * if the transaction is still pending and remove it in that case.
+	 */
+
+	return close_transaction(transaction, card, RCODE_CANCELLED, NULL, 0);
+}
+EXPORT_SYMBOL(fw_cancel_transaction);
+
+static void
+transmit_complete_callback(struct fw_packet *packet,
+			   struct fw_card *card, int status)
+{
+	struct fw_transaction *t =
+	    container_of(packet, struct fw_transaction, packet);
+
+	switch (status) {
+	case ACK_COMPLETE:
+		close_transaction(t, card, RCODE_COMPLETE, NULL, 0);
+		break;
+	case ACK_PENDING:
+		t->timestamp = packet->timestamp;
+		break;
+	case ACK_BUSY_X:
+	case ACK_BUSY_A:
+	case ACK_BUSY_B:
+		close_transaction(t, card, RCODE_BUSY, NULL, 0);
+		break;
+	case ACK_DATA_ERROR:
+		close_transaction(t, card, RCODE_DATA_ERROR, NULL, 0);
+		break;
+	case ACK_TYPE_ERROR:
+		close_transaction(t, card, RCODE_TYPE_ERROR, NULL, 0);
+		break;
+	default:
+		/*
+		 * In this case the ack is really a juju specific
+		 * rcode, so just forward that to the callback.
+		 */
+		close_transaction(t, card, status, NULL, 0);
+		break;
+	}
+}
+
+static void
+fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
+		int node_id, int source_id, int generation, int speed,
+		unsigned long long offset, void *payload, size_t length)
+{
+	int ext_tcode;
+
+	if (tcode > 0x10) {
+		ext_tcode = tcode - 0x10;
+		tcode = TCODE_LOCK_REQUEST;
+	} else
+		ext_tcode = 0;
+
+	packet->header[0] =
+		HEADER_RETRY(RETRY_X) |
+		HEADER_TLABEL(tlabel) |
+		HEADER_TCODE(tcode) |
+		HEADER_DESTINATION(node_id);
+	packet->header[1] =
+		HEADER_OFFSET_HIGH(offset >> 32) | HEADER_SOURCE(source_id);
+	packet->header[2] =
+		offset;
+
+	switch (tcode) {
+	case TCODE_WRITE_QUADLET_REQUEST:
+		packet->header[3] = *(u32 *)payload;
+		packet->header_length = 16;
+		packet->payload_length = 0;
+		break;
+
+	case TCODE_LOCK_REQUEST:
+	case TCODE_WRITE_BLOCK_REQUEST:
+		packet->header[3] =
+			HEADER_DATA_LENGTH(length) |
+			HEADER_EXTENDED_TCODE(ext_tcode);
+		packet->header_length = 16;
+		packet->payload = payload;
+		packet->payload_length = length;
+		break;
+
+	case TCODE_READ_QUADLET_REQUEST:
+		packet->header_length = 12;
+		packet->payload_length = 0;
+		break;
+
+	case TCODE_READ_BLOCK_REQUEST:
+		packet->header[3] =
+			HEADER_DATA_LENGTH(length) |
+			HEADER_EXTENDED_TCODE(ext_tcode);
+		packet->header_length = 16;
+		packet->payload_length = 0;
+		break;
+	}
+
+	packet->speed = speed;
+	packet->generation = generation;
+	packet->ack = 0;
+}
+
+/**
+ * This function provides low-level access to the IEEE1394 transaction
+ * logic.  Most C programs would use either fw_read(), fw_write() or
+ * fw_lock() instead - those function are convenience wrappers for
+ * this function.  The fw_send_request() function is primarily
+ * provided as a flexible, one-stop entry point for languages bindings
+ * and protocol bindings.
+ *
+ * FIXME: Document this function further, in particular the possible
+ * values for rcode in the callback.  In short, we map ACK_COMPLETE to
+ * RCODE_COMPLETE, internal errors set errno and set rcode to
+ * RCODE_SEND_ERROR (which is out of range for standard ieee1394
+ * rcodes).  All other rcodes are forwarded unchanged.  For all
+ * errors, payload is NULL, length is 0.
+ *
+ * Can not expect the callback to be called before the function
+ * returns, though this does happen in some cases (ACK_COMPLETE and
+ * errors).
+ *
+ * The payload is only used for write requests and must not be freed
+ * until the callback has been called.
+ *
+ * @param card the card from which to send the request
+ * @param tcode the tcode for this transaction.  Do not use
+ *   TCODE_LOCK_REQUEST directly, insted use TCODE_LOCK_MASK_SWAP
+ *   etc. to specify tcode and ext_tcode.
+ * @param node_id the destination node ID (bus ID and PHY ID concatenated)
+ * @param generation the generation for which node_id is valid
+ * @param speed the speed to use for sending the request
+ * @param offset the 48 bit offset on the destination node
+ * @param payload the data payload for the request subaction
+ * @param length the length in bytes of the data to read
+ * @param callback function to be called when the transaction is completed
+ * @param callback_data pointer to arbitrary data, which will be
+ *   passed to the callback
+ */
+void
+fw_send_request(struct fw_card *card, struct fw_transaction *t,
+		int tcode, int node_id, int generation, int speed,
+		unsigned long long offset,
+		void *payload, size_t length,
+		fw_transaction_callback_t callback, void *callback_data)
+{
+	unsigned long flags;
+	int tlabel, source;
+
+	/*
+	 * Bump the flush timer up 100ms first of all so we
+	 * don't race with a flush timer callback.
+	 */
+
+	mod_timer(&card->flush_timer, jiffies + DIV_ROUND_UP(HZ, 10));
+
+	/*
+	 * Allocate tlabel from the bitmap and put the transaction on
+	 * the list while holding the card spinlock.
+	 */
+
+	spin_lock_irqsave(&card->lock, flags);
+
+	source = card->node_id;
+	tlabel = card->current_tlabel;
+	if (card->tlabel_mask & (1 << tlabel)) {
+		spin_unlock_irqrestore(&card->lock, flags);
+		callback(card, RCODE_SEND_ERROR, NULL, 0, callback_data);
+		return;
+	}
+
+	card->current_tlabel = (card->current_tlabel + 1) & 0x1f;
+	card->tlabel_mask |= (1 << tlabel);
+
+	list_add_tail(&t->link, &card->transaction_list);
+
+	spin_unlock_irqrestore(&card->lock, flags);
+
+	/* Initialize rest of transaction, fill out packet and send it. */
+	t->node_id = node_id;
+	t->tlabel = tlabel;
+	t->callback = callback;
+	t->callback_data = callback_data;
+
+	fw_fill_request(&t->packet, tcode, t->tlabel,
+			node_id, source, generation,
+			speed, offset, payload, length);
+	t->packet.callback = transmit_complete_callback;
+
+	card->driver->send_request(card, &t->packet);
+}
+EXPORT_SYMBOL(fw_send_request);
+
+static void
+transmit_phy_packet_callback(struct fw_packet *packet,
+			     struct fw_card *card, int status)
+{
+	kfree(packet);
+}
+
+static void send_phy_packet(struct fw_card *card, u32 data, int generation)
+{
+	struct fw_packet *packet;
+
+	packet = kzalloc(sizeof(*packet), GFP_ATOMIC);
+	if (packet == NULL)
+		return;
+
+	packet->header[0] = data;
+	packet->header[1] = ~data;
+	packet->header_length = 8;
+	packet->payload_length = 0;
+	packet->speed = SCODE_100;
+	packet->generation = generation;
+	packet->callback = transmit_phy_packet_callback;
+
+	card->driver->send_request(card, packet);
+}
+
+void fw_send_phy_config(struct fw_card *card,
+			int node_id, int generation, int gap_count)
+{
+	u32 q;
+
+	q = PHY_IDENTIFIER(PHY_PACKET_CONFIG) |
+		PHY_CONFIG_ROOT_ID(node_id) |
+		PHY_CONFIG_GAP_COUNT(gap_count);
+
+	send_phy_packet(card, q, generation);
+}
+
+void fw_flush_transactions(struct fw_card *card)
+{
+	struct fw_transaction *t, *next;
+	struct list_head list;
+	unsigned long flags;
+
+	INIT_LIST_HEAD(&list);
+	spin_lock_irqsave(&card->lock, flags);
+	list_splice_init(&card->transaction_list, &list);
+	card->tlabel_mask = 0;
+	spin_unlock_irqrestore(&card->lock, flags);
+
+	list_for_each_entry_safe(t, next, &list, link) {
+		card->driver->cancel_packet(card, &t->packet);
+
+		/*
+		 * At this point cancel_packet will never call the
+		 * transaction callback, since we just took all the
+		 * transactions out of the list.  So do it here.
+		 */
+		t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data);
+	}
+}
+
+static struct fw_address_handler *
+lookup_overlapping_address_handler(struct list_head *list,
+				   unsigned long long offset, size_t length)
+{
+	struct fw_address_handler *handler;
+
+	list_for_each_entry(handler, list, link) {
+		if (handler->offset < offset + length &&
+		    offset < handler->offset + handler->length)
+			return handler;
+	}
+
+	return NULL;
+}
+
+static struct fw_address_handler *
+lookup_enclosing_address_handler(struct list_head *list,
+				 unsigned long long offset, size_t length)
+{
+	struct fw_address_handler *handler;
+
+	list_for_each_entry(handler, list, link) {
+		if (handler->offset <= offset &&
+		    offset + length <= handler->offset + handler->length)
+			return handler;
+	}
+
+	return NULL;
+}
+
+static DEFINE_SPINLOCK(address_handler_lock);
+static LIST_HEAD(address_handler_list);
+
+const struct fw_address_region fw_low_memory_region =
+	{ .start = 0x000000000000ULL, .end = 0x000100000000ULL,  };
+const struct fw_address_region fw_high_memory_region =
+	{ .start = 0x000100000000ULL, .end = 0xffffe0000000ULL,  };
+const struct fw_address_region fw_private_region =
+	{ .start = 0xffffe0000000ULL, .end = 0xfffff0000000ULL,  };
+const struct fw_address_region fw_csr_region =
+	{ .start = 0xfffff0000000ULL, .end = 0xfffff0000800ULL,  };
+const struct fw_address_region fw_unit_space_region =
+	{ .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, };
+EXPORT_SYMBOL(fw_low_memory_region);
+EXPORT_SYMBOL(fw_high_memory_region);
+EXPORT_SYMBOL(fw_private_region);
+EXPORT_SYMBOL(fw_csr_region);
+EXPORT_SYMBOL(fw_unit_space_region);
+
+/**
+ * Allocate a range of addresses in the node space of the OHCI
+ * controller.  When a request is received that falls within the
+ * specified address range, the specified callback is invoked.  The
+ * parameters passed to the callback give the details of the
+ * particular request
+ */
+int
+fw_core_add_address_handler(struct fw_address_handler *handler,
+			    const struct fw_address_region *region)
+{
+	struct fw_address_handler *other;
+	unsigned long flags;
+	int ret = -EBUSY;
+
+	spin_lock_irqsave(&address_handler_lock, flags);
+
+	handler->offset = region->start;
+	while (handler->offset + handler->length <= region->end) {
+		other =
+		    lookup_overlapping_address_handler(&address_handler_list,
+						       handler->offset,
+						       handler->length);
+		if (other != NULL) {
+			handler->offset += other->length;
+		} else {
+			list_add_tail(&handler->link, &address_handler_list);
+			ret = 0;
+			break;
+		}
+	}
+
+	spin_unlock_irqrestore(&address_handler_lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL(fw_core_add_address_handler);
+
+/**
+ * Deallocate a range of addresses allocated with fw_allocate.  This
+ * will call the associated callback one last time with a the special
+ * tcode TCODE_DEALLOCATE, to let the client destroy the registered
+ * callback data.  For convenience, the callback parameters offset and
+ * length are set to the start and the length respectively for the
+ * deallocated region, payload is set to NULL.
+ */
+void fw_core_remove_address_handler(struct fw_address_handler *handler)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&address_handler_lock, flags);
+	list_del(&handler->link);
+	spin_unlock_irqrestore(&address_handler_lock, flags);
+}
+EXPORT_SYMBOL(fw_core_remove_address_handler);
+
+struct fw_request {
+	struct fw_packet response;
+	u32 request_header[4];
+	int ack;
+	u32 length;
+	u32 data[0];
+};
+
+static void
+free_response_callback(struct fw_packet *packet,
+		       struct fw_card *card, int status)
+{
+	struct fw_request *request;
+
+	request = container_of(packet, struct fw_request, response);
+	kfree(request);
+}
+
+void
+fw_fill_response(struct fw_packet *response, u32 *request_header,
+		 int rcode, void *payload, size_t length)
+{
+	int tcode, tlabel, extended_tcode, source, destination;
+
+	tcode          = HEADER_GET_TCODE(request_header[0]);
+	tlabel         = HEADER_GET_TLABEL(request_header[0]);
+	source         = HEADER_GET_DESTINATION(request_header[0]);
+	destination    = HEADER_GET_SOURCE(request_header[1]);
+	extended_tcode = HEADER_GET_EXTENDED_TCODE(request_header[3]);
+
+	response->header[0] =
+		HEADER_RETRY(RETRY_1) |
+		HEADER_TLABEL(tlabel) |
+		HEADER_DESTINATION(destination);
+	response->header[1] =
+		HEADER_SOURCE(source) |
+		HEADER_RCODE(rcode);
+	response->header[2] = 0;
+
+	switch (tcode) {
+	case TCODE_WRITE_QUADLET_REQUEST:
+	case TCODE_WRITE_BLOCK_REQUEST:
+		response->header[0] |= HEADER_TCODE(TCODE_WRITE_RESPONSE);
+		response->header_length = 12;
+		response->payload_length = 0;
+		break;
+
+	case TCODE_READ_QUADLET_REQUEST:
+		response->header[0] |=
+			HEADER_TCODE(TCODE_READ_QUADLET_RESPONSE);
+		if (payload != NULL)
+			response->header[3] = *(u32 *)payload;
+		else
+			response->header[3] = 0;
+		response->header_length = 16;
+		response->payload_length = 0;
+		break;
+
+	case TCODE_READ_BLOCK_REQUEST:
+	case TCODE_LOCK_REQUEST:
+		response->header[0] |= HEADER_TCODE(tcode + 2);
+		response->header[3] =
+			HEADER_DATA_LENGTH(length) |
+			HEADER_EXTENDED_TCODE(extended_tcode);
+		response->header_length = 16;
+		response->payload = payload;
+		response->payload_length = length;
+		break;
+
+	default:
+		BUG();
+		return;
+	}
+}
+EXPORT_SYMBOL(fw_fill_response);
+
+static struct fw_request *
+allocate_request(struct fw_packet *p)
+{
+	struct fw_request *request;
+	u32 *data, length;
+	int request_tcode, t;
+
+	request_tcode = HEADER_GET_TCODE(p->header[0]);
+	switch (request_tcode) {
+	case TCODE_WRITE_QUADLET_REQUEST:
+		data = &p->header[3];
+		length = 4;
+		break;
+
+	case TCODE_WRITE_BLOCK_REQUEST:
+	case TCODE_LOCK_REQUEST:
+		data = p->payload;
+		length = HEADER_GET_DATA_LENGTH(p->header[3]);
+		break;
+
+	case TCODE_READ_QUADLET_REQUEST:
+		data = NULL;
+		length = 4;
+		break;
+
+	case TCODE_READ_BLOCK_REQUEST:
+		data = NULL;
+		length = HEADER_GET_DATA_LENGTH(p->header[3]);
+		break;
+
+	default:
+		BUG();
+		return NULL;
+	}
+
+	request = kmalloc(sizeof(*request) + length, GFP_ATOMIC);
+	if (request == NULL)
+		return NULL;
+
+	t = (p->timestamp & 0x1fff) + 4000;
+	if (t >= 8000)
+		t = (p->timestamp & ~0x1fff) + 0x2000 + t - 8000;
+	else
+		t = (p->timestamp & ~0x1fff) + t;
+
+	request->response.speed = p->speed;
+	request->response.timestamp = t;
+	request->response.generation = p->generation;
+	request->response.ack = 0;
+	request->response.callback = free_response_callback;
+	request->ack = p->ack;
+	request->length = length;
+	if (data)
+		memcpy(request->data, data, length);
+
+	memcpy(request->request_header, p->header, sizeof(p->header));
+
+	return request;
+}
+
+void
+fw_send_response(struct fw_card *card, struct fw_request *request, int rcode)
+{
+	/*
+	 * Broadcast packets are reported as ACK_COMPLETE, so this
+	 * check is sufficient to ensure we don't send response to
+	 * broadcast packets or posted writes.
+	 */
+	if (request->ack != ACK_PENDING)
+		return;
+
+	if (rcode == RCODE_COMPLETE)
+		fw_fill_response(&request->response, request->request_header,
+				 rcode, request->data, request->length);
+	else
+		fw_fill_response(&request->response, request->request_header,
+				 rcode, NULL, 0);
+
+	card->driver->send_response(card, &request->response);
+}
+EXPORT_SYMBOL(fw_send_response);
+
+void
+fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
+{
+	struct fw_address_handler *handler;
+	struct fw_request *request;
+	unsigned long long offset;
+	unsigned long flags;
+	int tcode, destination, source;
+
+	if (p->payload_length > 2048) {
+		/* FIXME: send error response. */
+		return;
+	}
+
+	if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE)
+		return;
+
+	request = allocate_request(p);
+	if (request == NULL) {
+		/* FIXME: send statically allocated busy packet. */
+		return;
+	}
+
+	offset      =
+		((unsigned long long)
+		 HEADER_GET_OFFSET_HIGH(p->header[1]) << 32) | p->header[2];
+	tcode       = HEADER_GET_TCODE(p->header[0]);
+	destination = HEADER_GET_DESTINATION(p->header[0]);
+	source      = HEADER_GET_SOURCE(p->header[0]);
+
+	spin_lock_irqsave(&address_handler_lock, flags);
+	handler = lookup_enclosing_address_handler(&address_handler_list,
+						   offset, request->length);
+	spin_unlock_irqrestore(&address_handler_lock, flags);
+
+	/*
+	 * FIXME: lookup the fw_node corresponding to the sender of
+	 * this request and pass that to the address handler instead
+	 * of the node ID.  We may also want to move the address
+	 * allocations to fw_node so we only do this callback if the
+	 * upper layers registered it for this node.
+	 */
+
+	if (handler == NULL)
+		fw_send_response(card, request, RCODE_ADDRESS_ERROR);
+	else
+		handler->address_callback(card, request,
+					  tcode, destination, source,
+					  p->generation, p->speed, offset,
+					  request->data, request->length,
+					  handler->callback_data);
+}
+EXPORT_SYMBOL(fw_core_handle_request);
+
+void
+fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
+{
+	struct fw_transaction *t;
+	unsigned long flags;
+	u32 *data;
+	size_t data_length;
+	int tcode, tlabel, destination, source, rcode;
+
+	tcode       = HEADER_GET_TCODE(p->header[0]);
+	tlabel      = HEADER_GET_TLABEL(p->header[0]);
+	destination = HEADER_GET_DESTINATION(p->header[0]);
+	source      = HEADER_GET_SOURCE(p->header[1]);
+	rcode       = HEADER_GET_RCODE(p->header[1]);
+
+	spin_lock_irqsave(&card->lock, flags);
+	list_for_each_entry(t, &card->transaction_list, link) {
+		if (t->node_id == source && t->tlabel == tlabel) {
+			list_del(&t->link);
+			card->tlabel_mask &= ~(1 << t->tlabel);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&card->lock, flags);
+
+	if (&t->link == &card->transaction_list) {
+		fw_notify("Unsolicited response (source %x, tlabel %x)\n",
+			  source, tlabel);
+		return;
+	}
+
+	/*
+	 * FIXME: sanity check packet, is length correct, does tcodes
+	 * and addresses match.
+	 */
+
+	switch (tcode) {
+	case TCODE_READ_QUADLET_RESPONSE:
+		data = (u32 *) &p->header[3];
+		data_length = 4;
+		break;
+
+	case TCODE_WRITE_RESPONSE:
+		data = NULL;
+		data_length = 0;
+		break;
+
+	case TCODE_READ_BLOCK_RESPONSE:
+	case TCODE_LOCK_RESPONSE:
+		data = p->payload;
+		data_length = HEADER_GET_DATA_LENGTH(p->header[3]);
+		break;
+
+	default:
+		/* Should never happen, this is just to shut up gcc. */
+		data = NULL;
+		data_length = 0;
+		break;
+	}
+
+	t->callback(card, rcode, data, data_length, t->callback_data);
+}
+EXPORT_SYMBOL(fw_core_handle_response);
+
+const struct fw_address_region topology_map_region =
+	{ .start = 0xfffff0001000ull, .end = 0xfffff0001400ull, };
+
+static void
+handle_topology_map(struct fw_card *card, struct fw_request *request,
+		    int tcode, int destination, int source,
+		    int generation, int speed,
+		    unsigned long long offset,
+		    void *payload, size_t length, void *callback_data)
+{
+	int i, start, end;
+	u32 *map;
+
+	if (!TCODE_IS_READ_REQUEST(tcode)) {
+		fw_send_response(card, request, RCODE_TYPE_ERROR);
+		return;
+	}
+
+	if ((offset & 3) > 0 || (length & 3) > 0) {
+		fw_send_response(card, request, RCODE_ADDRESS_ERROR);
+		return;
+	}
+
+	start = (offset - topology_map_region.start) / 4;
+	end = start + length / 4;
+	map = payload;
+
+	for (i = 0; i < length / 4; i++)
+		map[i] = cpu_to_be32(card->topology_map[start + i]);
+
+	fw_send_response(card, request, RCODE_COMPLETE);
+}
+
+static struct fw_address_handler topology_map = {
+	.length			= 0x200,
+	.address_callback	= handle_topology_map,
+};
+
+const struct fw_address_region registers_region =
+	{ .start = 0xfffff0000000ull, .end = 0xfffff0000400ull, };
+
+static void
+handle_registers(struct fw_card *card, struct fw_request *request,
+		 int tcode, int destination, int source,
+		 int generation, int speed,
+		 unsigned long long offset,
+		 void *payload, size_t length, void *callback_data)
+{
+	int reg = offset - CSR_REGISTER_BASE;
+	unsigned long long bus_time;
+	__be32 *data = payload;
+
+	switch (reg) {
+	case CSR_CYCLE_TIME:
+	case CSR_BUS_TIME:
+		if (!TCODE_IS_READ_REQUEST(tcode) || length != 4) {
+			fw_send_response(card, request, RCODE_TYPE_ERROR);
+			break;
+		}
+
+		bus_time = card->driver->get_bus_time(card);
+		if (reg == CSR_CYCLE_TIME)
+			*data = cpu_to_be32(bus_time);
+		else
+			*data = cpu_to_be32(bus_time >> 25);
+		fw_send_response(card, request, RCODE_COMPLETE);
+		break;
+
+	case CSR_BUS_MANAGER_ID:
+	case CSR_BANDWIDTH_AVAILABLE:
+	case CSR_CHANNELS_AVAILABLE_HI:
+	case CSR_CHANNELS_AVAILABLE_LO:
+		/*
+		 * FIXME: these are handled by the OHCI hardware and
+		 * the stack never sees these request. If we add
+		 * support for a new type of controller that doesn't
+		 * handle this in hardware we need to deal with these
+		 * transactions.
+		 */
+		BUG();
+		break;
+
+	case CSR_BUSY_TIMEOUT:
+		/* FIXME: Implement this. */
+	default:
+		fw_send_response(card, request, RCODE_ADDRESS_ERROR);
+		break;
+	}
+}
+
+static struct fw_address_handler registers = {
+	.length			= 0x400,
+	.address_callback	= handle_registers,
+};
+
+MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
+MODULE_DESCRIPTION("Core IEEE1394 transaction logic");
+MODULE_LICENSE("GPL");
+
+static const u32 vendor_textual_descriptor[] = {
+	/* textual descriptor leaf () */
+	0x00060000,
+	0x00000000,
+	0x00000000,
+	0x4c696e75,		/* L i n u */
+	0x78204669,		/* x   F i */
+	0x72657769,		/* r e w i */
+	0x72650000,		/* r e     */
+};
+
+static const u32 model_textual_descriptor[] = {
+	/* model descriptor leaf () */
+	0x00030000,
+	0x00000000,
+	0x00000000,
+	0x4a756a75,		/* J u j u */
+};
+
+static struct fw_descriptor vendor_id_descriptor = {
+	.length = ARRAY_SIZE(vendor_textual_descriptor),
+	.immediate = 0x03d00d1e,
+	.key = 0x81000000,
+	.data = vendor_textual_descriptor,
+};
+
+static struct fw_descriptor model_id_descriptor = {
+	.length = ARRAY_SIZE(model_textual_descriptor),
+	.immediate = 0x17000001,
+	.key = 0x81000000,
+	.data = model_textual_descriptor,
+};
+
+static int __init fw_core_init(void)
+{
+	int retval;
+
+	retval = bus_register(&fw_bus_type);
+	if (retval < 0)
+		return retval;
+
+	fw_cdev_major = register_chrdev(0, "firewire", &fw_device_ops);
+	if (fw_cdev_major < 0) {
+		bus_unregister(&fw_bus_type);
+		return fw_cdev_major;
+	}
+
+	retval = fw_core_add_address_handler(&topology_map,
+					     &topology_map_region);
+	BUG_ON(retval < 0);
+
+	retval = fw_core_add_address_handler(&registers,
+					     &registers_region);
+	BUG_ON(retval < 0);
+
+	/* Add the vendor textual descriptor. */
+	retval = fw_core_add_descriptor(&vendor_id_descriptor);
+	BUG_ON(retval < 0);
+	retval = fw_core_add_descriptor(&model_id_descriptor);
+	BUG_ON(retval < 0);
+
+	return 0;
+}
+
+static void __exit fw_core_cleanup(void)
+{
+	unregister_chrdev(fw_cdev_major, "firewire");
+	bus_unregister(&fw_bus_type);
+}
+
+module_init(fw_core_init);
+module_exit(fw_core_cleanup);
diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h
new file mode 100644
index 000000000000..acdc3be38c61
--- /dev/null
+++ b/drivers/firewire/fw-transaction.h
@@ -0,0 +1,458 @@
+/*
+ * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __fw_transaction_h
+#define __fw_transaction_h
+
+#include <linux/device.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/fs.h>
+#include <linux/dma-mapping.h>
+#include <linux/firewire-constants.h>
+
+#define TCODE_IS_READ_REQUEST(tcode)	(((tcode) & ~1) == 4)
+#define TCODE_IS_BLOCK_PACKET(tcode)	(((tcode) &  1) != 0)
+#define TCODE_IS_REQUEST(tcode)		(((tcode) &  2) == 0)
+#define TCODE_IS_RESPONSE(tcode)	(((tcode) &  2) != 0)
+#define TCODE_HAS_REQUEST_DATA(tcode)	(((tcode) & 12) != 4)
+#define TCODE_HAS_RESPONSE_DATA(tcode)	(((tcode) & 12) != 0)
+
+#define LOCAL_BUS 0xffc0
+
+#define SELFID_PORT_CHILD	0x3
+#define SELFID_PORT_PARENT	0x2
+#define SELFID_PORT_NCONN	0x1
+#define SELFID_PORT_NONE	0x0
+
+#define PHY_PACKET_CONFIG	0x0
+#define PHY_PACKET_LINK_ON	0x1
+#define PHY_PACKET_SELF_ID	0x2
+
+/* Bit fields _within_ the PHY registers. */
+#define PHY_LINK_ACTIVE		0x80
+#define PHY_CONTENDER		0x40
+#define PHY_BUS_RESET		0x40
+#define PHY_BUS_SHORT_RESET	0x40
+
+#define CSR_REGISTER_BASE		0xfffff0000000ULL
+
+/* register offsets relative to CSR_REGISTER_BASE */
+#define CSR_STATE_CLEAR			0x0
+#define CSR_STATE_SET			0x4
+#define CSR_NODE_IDS			0x8
+#define CSR_RESET_START			0xc
+#define CSR_SPLIT_TIMEOUT_HI		0x18
+#define CSR_SPLIT_TIMEOUT_LO		0x1c
+#define CSR_CYCLE_TIME			0x200
+#define CSR_BUS_TIME			0x204
+#define CSR_BUSY_TIMEOUT		0x210
+#define CSR_BUS_MANAGER_ID		0x21c
+#define CSR_BANDWIDTH_AVAILABLE		0x220
+#define CSR_CHANNELS_AVAILABLE		0x224
+#define CSR_CHANNELS_AVAILABLE_HI	0x224
+#define CSR_CHANNELS_AVAILABLE_LO	0x228
+#define CSR_BROADCAST_CHANNEL		0x234
+#define CSR_CONFIG_ROM			0x400
+#define CSR_CONFIG_ROM_END		0x800
+#define CSR_FCP_COMMAND			0xB00
+#define CSR_FCP_RESPONSE		0xD00
+#define CSR_FCP_END			0xF00
+#define CSR_TOPOLOGY_MAP		0x1000
+#define CSR_TOPOLOGY_MAP_END		0x1400
+#define CSR_SPEED_MAP			0x2000
+#define CSR_SPEED_MAP_END		0x3000
+
+#define fw_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, ## args)
+#define fw_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args)
+#define fw_debug(s, args...) printk(KERN_DEBUG KBUILD_MODNAME ": " s, ## args)
+
+static inline void
+fw_memcpy_from_be32(void *_dst, void *_src, size_t size)
+{
+	u32 *dst = _dst;
+	u32 *src = _src;
+	int i;
+
+	for (i = 0; i < size / 4; i++)
+		dst[i] = cpu_to_be32(src[i]);
+}
+
+static inline void
+fw_memcpy_to_be32(void *_dst, void *_src, size_t size)
+{
+	fw_memcpy_from_be32(_dst, _src, size);
+}
+
+struct fw_card;
+struct fw_packet;
+struct fw_node;
+struct fw_request;
+
+struct fw_descriptor {
+	struct list_head link;
+	size_t length;
+	u32 immediate;
+	u32 key;
+	const u32 *data;
+};
+
+int fw_core_add_descriptor(struct fw_descriptor *desc);
+void fw_core_remove_descriptor(struct fw_descriptor *desc);
+
+typedef void (*fw_packet_callback_t)(struct fw_packet *packet,
+				     struct fw_card *card, int status);
+
+typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode,
+					  void *data,
+					  size_t length,
+					  void *callback_data);
+
+typedef void (*fw_address_callback_t)(struct fw_card *card,
+				      struct fw_request *request,
+				      int tcode, int destination, int source,
+				      int generation, int speed,
+				      unsigned long long offset,
+				      void *data, size_t length,
+				      void *callback_data);
+
+typedef void (*fw_bus_reset_callback_t)(struct fw_card *handle,
+					int node_id, int generation,
+					u32 *self_ids,
+					int self_id_count,
+					void *callback_data);
+
+struct fw_packet {
+	int speed;
+	int generation;
+	u32 header[4];
+	size_t header_length;
+	void *payload;
+	size_t payload_length;
+	u32 timestamp;
+
+	/*
+	 * This callback is called when the packet transmission has
+	 * completed; for successful transmission, the status code is
+	 * the ack received from the destination, otherwise it's a
+	 * negative errno: ENOMEM, ESTALE, ETIMEDOUT, ENODEV, EIO.
+	 * The callback can be called from tasklet context and thus
+	 * must never block.
+	 */
+	fw_packet_callback_t callback;
+	int ack;
+	struct list_head link;
+	void *driver_data;
+};
+
+struct fw_transaction {
+	int node_id; /* The generation is implied; it is always the current. */
+	int tlabel;
+	int timestamp;
+	struct list_head link;
+
+	struct fw_packet packet;
+
+	/*
+	 * The data passed to the callback is valid only during the
+	 * callback.
+	 */
+	fw_transaction_callback_t callback;
+	void *callback_data;
+};
+
+static inline struct fw_packet *
+fw_packet(struct list_head *l)
+{
+	return list_entry(l, struct fw_packet, link);
+}
+
+struct fw_address_handler {
+	u64 offset;
+	size_t length;
+	fw_address_callback_t address_callback;
+	void *callback_data;
+	struct list_head link;
+};
+
+
+struct fw_address_region {
+	u64 start;
+	u64 end;
+};
+
+extern const struct fw_address_region fw_low_memory_region;
+extern const struct fw_address_region fw_high_memory_region;
+extern const struct fw_address_region fw_private_region;
+extern const struct fw_address_region fw_csr_region;
+extern const struct fw_address_region fw_unit_space_region;
+
+int fw_core_add_address_handler(struct fw_address_handler *handler,
+				const struct fw_address_region *region);
+void fw_core_remove_address_handler(struct fw_address_handler *handler);
+void fw_fill_response(struct fw_packet *response, u32 *request_header,
+		      int rcode, void *payload, size_t length);
+void fw_send_response(struct fw_card *card,
+		      struct fw_request *request, int rcode);
+
+extern struct bus_type fw_bus_type;
+
+struct fw_card {
+	const struct fw_card_driver *driver;
+	struct device *device;
+	struct kref kref;
+
+	int node_id;
+	int generation;
+	/* This is the generation used for timestamping incoming requests. */
+	int request_generation;
+	int current_tlabel, tlabel_mask;
+	struct list_head transaction_list;
+	struct timer_list flush_timer;
+	unsigned long reset_jiffies;
+
+	unsigned long long guid;
+	int max_receive;
+	int link_speed;
+	int config_rom_generation;
+
+	/*
+	 * We need to store up to 4 self ID for a maximum of 63
+	 * devices plus 3 words for the topology map header.
+	 */
+	int self_id_count;
+	u32 topology_map[252 + 3];
+
+	spinlock_t lock; /* Take this lock when handling the lists in
+			  * this struct. */
+	struct fw_node *local_node;
+	struct fw_node *root_node;
+	struct fw_node *irm_node;
+	int color;
+	int gap_count;
+	int topology_type;
+
+	int index;
+
+	struct list_head link;
+
+	/* Work struct for BM duties. */
+	struct delayed_work work;
+	int bm_retries;
+	int bm_generation;
+};
+
+struct fw_card *fw_card_get(struct fw_card *card);
+void fw_card_put(struct fw_card *card);
+
+/*
+ * The iso packet format allows for an immediate header/payload part
+ * stored in 'header' immediately after the packet info plus an
+ * indirect payload part that is pointer to by the 'payload' field.
+ * Applications can use one or the other or both to implement simple
+ * low-bandwidth streaming (e.g. audio) or more advanced
+ * scatter-gather streaming (e.g. assembling video frame automatically).
+ */
+
+struct fw_iso_packet {
+	u16 payload_length;	/* Length of indirect payload. */
+	u32 interrupt : 1;	/* Generate interrupt on this packet */
+	u32 skip : 1;		/* Set to not send packet at all. */
+	u32 tag : 2;
+	u32 sy : 4;
+	u32 header_length : 8;	/* Length of immediate header. */
+	u32 header[0];
+};
+
+#define FW_ISO_CONTEXT_TRANSMIT	0
+#define FW_ISO_CONTEXT_RECEIVE	1
+
+#define FW_ISO_CONTEXT_MATCH_TAG0	 1
+#define FW_ISO_CONTEXT_MATCH_TAG1	 2
+#define FW_ISO_CONTEXT_MATCH_TAG2	 4
+#define FW_ISO_CONTEXT_MATCH_TAG3	 8
+#define FW_ISO_CONTEXT_MATCH_ALL_TAGS	15
+
+struct fw_iso_context;
+
+typedef void (*fw_iso_callback_t)(struct fw_iso_context *context,
+				  u32 cycle,
+				  size_t header_length,
+				  void *header,
+				  void *data);
+
+/*
+ * An iso buffer is just a set of pages mapped for DMA in the
+ * specified direction.  Since the pages are to be used for DMA, they
+ * are not mapped into the kernel virtual address space.  We store the
+ * DMA address in the page private. The helper function
+ * fw_iso_buffer_map() will map the pages into a given vma.
+ */
+
+struct fw_iso_buffer {
+	enum dma_data_direction direction;
+	struct page **pages;
+	int page_count;
+};
+
+struct fw_iso_context {
+	struct fw_card *card;
+	int type;
+	int channel;
+	int speed;
+	size_t header_size;
+	fw_iso_callback_t callback;
+	void *callback_data;
+};
+
+int
+fw_iso_buffer_init(struct fw_iso_buffer *buffer,
+		   struct fw_card *card,
+		   int page_count,
+		   enum dma_data_direction direction);
+int
+fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma);
+void
+fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card);
+
+struct fw_iso_context *
+fw_iso_context_create(struct fw_card *card, int type,
+		      int channel, int speed, size_t header_size,
+		      fw_iso_callback_t callback, void *callback_data);
+
+void
+fw_iso_context_destroy(struct fw_iso_context *ctx);
+
+int
+fw_iso_context_queue(struct fw_iso_context *ctx,
+		     struct fw_iso_packet *packet,
+		     struct fw_iso_buffer *buffer,
+		     unsigned long payload);
+
+int
+fw_iso_context_start(struct fw_iso_context *ctx,
+		     int cycle, int sync, int tags);
+
+int
+fw_iso_context_stop(struct fw_iso_context *ctx);
+
+struct fw_card_driver {
+	const char *name;
+
+	/*
+	 * Enable the given card with the given initial config rom.
+	 * This function is expected to activate the card, and either
+	 * enable the PHY or set the link_on bit and initiate a bus
+	 * reset.
+	 */
+	int (*enable)(struct fw_card *card, u32 *config_rom, size_t length);
+
+	int (*update_phy_reg)(struct fw_card *card, int address,
+			      int clear_bits, int set_bits);
+
+	/*
+	 * Update the config rom for an enabled card.  This function
+	 * should change the config rom that is presented on the bus
+	 * an initiate a bus reset.
+	 */
+	int (*set_config_rom)(struct fw_card *card,
+			      u32 *config_rom, size_t length);
+
+	void (*send_request)(struct fw_card *card, struct fw_packet *packet);
+	void (*send_response)(struct fw_card *card, struct fw_packet *packet);
+	/* Calling cancel is valid once a packet has been submitted. */
+	int (*cancel_packet)(struct fw_card *card, struct fw_packet *packet);
+
+	/*
+	 * Allow the specified node ID to do direct DMA out and in of
+	 * host memory.  The card will disable this for all node when
+	 * a bus reset happens, so driver need to reenable this after
+	 * bus reset.  Returns 0 on success, -ENODEV if the card
+	 * doesn't support this, -ESTALE if the generation doesn't
+	 * match.
+	 */
+	int (*enable_phys_dma)(struct fw_card *card,
+			       int node_id, int generation);
+
+	u64 (*get_bus_time)(struct fw_card *card);
+
+	struct fw_iso_context *
+	(*allocate_iso_context)(struct fw_card *card,
+				int type, size_t header_size);
+	void (*free_iso_context)(struct fw_iso_context *ctx);
+
+	int (*start_iso)(struct fw_iso_context *ctx,
+			 s32 cycle, u32 sync, u32 tags);
+
+	int (*queue_iso)(struct fw_iso_context *ctx,
+			 struct fw_iso_packet *packet,
+			 struct fw_iso_buffer *buffer,
+			 unsigned long payload);
+
+	int (*stop_iso)(struct fw_iso_context *ctx);
+};
+
+int
+fw_core_initiate_bus_reset(struct fw_card *card, int short_reset);
+
+void
+fw_send_request(struct fw_card *card, struct fw_transaction *t,
+		int tcode, int node_id, int generation, int speed,
+		unsigned long long offset,
+		void *data, size_t length,
+		fw_transaction_callback_t callback, void *callback_data);
+
+int fw_cancel_transaction(struct fw_card *card,
+			  struct fw_transaction *transaction);
+
+void fw_flush_transactions(struct fw_card *card);
+
+void fw_send_phy_config(struct fw_card *card,
+			int node_id, int generation, int gap_count);
+
+/*
+ * Called by the topology code to inform the device code of node
+ * activity; found, lost, or updated nodes.
+ */
+void
+fw_node_event(struct fw_card *card, struct fw_node *node, int event);
+
+/* API used by card level drivers */
+
+void
+fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver,
+		   struct device *device);
+int
+fw_card_add(struct fw_card *card,
+	    u32 max_receive, u32 link_speed, u64 guid);
+
+void
+fw_core_remove_card(struct fw_card *card);
+
+void
+fw_core_handle_bus_reset(struct fw_card *card,
+			 int node_id, int generation,
+			 int self_id_count, u32 *self_ids);
+void
+fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
+
+void
+fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
+
+#endif /* __fw_transaction_h */
diff --git a/drivers/ieee1394/Kconfig b/drivers/ieee1394/Kconfig
index f21426ad2faa..8012b3b0ce75 100644
--- a/drivers/ieee1394/Kconfig
+++ b/drivers/ieee1394/Kconfig
@@ -1,6 +1,8 @@
 menu "IEEE 1394 (FireWire) support"
 	depends on PCI || BROKEN
 
+source "drivers/firewire/Kconfig"
+
 config IEEE1394
 	tristate "IEEE 1394 (FireWire) support"
 	depends on PCI || BROKEN
diff --git a/include/linux/crc-itu-t.h b/include/linux/crc-itu-t.h
new file mode 100644
index 000000000000..84920f3cc83e
--- /dev/null
+++ b/include/linux/crc-itu-t.h
@@ -0,0 +1,28 @@
+/*
+ *	crc-itu-t.h - CRC ITU-T V.41 routine
+ *
+ * Implements the standard CRC ITU-T V.41:
+ *   Width 16
+ *   Poly  0x0x1021 (x^16 + x^12 + x^15 + 1)
+ *   Init  0
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+#ifndef CRC_ITU_T_H
+#define CRC_ITU_T_H
+
+#include <linux/types.h>
+
+extern u16 const crc_itu_t_table[256];
+
+extern u16 crc_itu_t(u16 crc, const u8 *buffer, size_t len);
+
+static inline u16 crc_itu_t_byte(u16 crc, const u8 data)
+{
+	return (crc << 8) ^ crc_itu_t_table[((crc >> 8) ^ data) & 0xff];
+}
+
+#endif /* CRC_ITU_T_H */
+
diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h
new file mode 100644
index 000000000000..d4455eb2ae35
--- /dev/null
+++ b/include/linux/firewire-cdev.h
@@ -0,0 +1,229 @@
+/*
+ * Char device interface.
+ *
+ * Copyright (C) 2005-2006  Kristian Hoegsberg <krh@bitplanet.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _LINUX_FIREWIRE_CDEV_H
+#define _LINUX_FIREWIRE_CDEV_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+#include <linux/firewire-constants.h>
+
+#define FW_CDEV_EVENT_BUS_RESET		0x00
+#define FW_CDEV_EVENT_RESPONSE		0x01
+#define FW_CDEV_EVENT_REQUEST		0x02
+#define FW_CDEV_EVENT_ISO_INTERRUPT	0x03
+
+/* The 'closure' fields are for user space to use.  Data passed in the
+ * 'closure' field for a request will be returned in the corresponding
+ * event.  It's a 64-bit type so that it's a fixed size type big
+ * enough to hold a pointer on all platforms. */
+
+struct fw_cdev_event_common {
+	__u64 closure;
+	__u32 type;
+};
+
+struct fw_cdev_event_bus_reset {
+	__u64 closure;
+	__u32 type;
+	__u32 node_id;
+	__u32 local_node_id;
+	__u32 bm_node_id;
+	__u32 irm_node_id;
+	__u32 root_node_id;
+	__u32 generation;
+};
+
+struct fw_cdev_event_response {
+	__u64 closure;
+	__u32 type;
+	__u32 rcode;
+	__u32 length;
+	__u32 data[0];
+};
+
+struct fw_cdev_event_request {
+	__u64 closure;
+	__u32 type;
+	__u32 tcode;
+	__u64 offset;
+	__u32 handle;
+	__u32 length;
+	__u32 data[0];
+};
+
+struct fw_cdev_event_iso_interrupt {
+	__u64 closure;
+	__u32 type;
+	__u32 cycle;
+	__u32 header_length;	/* Length in bytes of following headers. */
+	__u32 header[0];
+};
+
+union fw_cdev_event {
+	struct fw_cdev_event_common common;
+	struct fw_cdev_event_bus_reset bus_reset;
+	struct fw_cdev_event_response response;
+	struct fw_cdev_event_request request;
+	struct fw_cdev_event_iso_interrupt iso_interrupt;
+};
+
+#define FW_CDEV_IOC_GET_INFO		_IOWR('#', 0x00, struct fw_cdev_get_info)
+#define FW_CDEV_IOC_SEND_REQUEST	_IOW('#', 0x01, struct fw_cdev_send_request)
+#define FW_CDEV_IOC_ALLOCATE		_IOWR('#', 0x02, struct fw_cdev_allocate)
+#define FW_CDEV_IOC_DEALLOCATE		_IOW('#', 0x03, struct fw_cdev_deallocate)
+#define FW_CDEV_IOC_SEND_RESPONSE	_IOW('#', 0x04, struct fw_cdev_send_response)
+#define FW_CDEV_IOC_INITIATE_BUS_RESET	_IOW('#', 0x05, struct fw_cdev_initiate_bus_reset)
+#define FW_CDEV_IOC_ADD_DESCRIPTOR	_IOWR('#', 0x06, struct fw_cdev_add_descriptor)
+#define FW_CDEV_IOC_REMOVE_DESCRIPTOR	_IOW('#', 0x07, struct fw_cdev_remove_descriptor)
+
+#define FW_CDEV_IOC_CREATE_ISO_CONTEXT	_IOWR('#', 0x08, struct fw_cdev_create_iso_context)
+#define FW_CDEV_IOC_QUEUE_ISO		_IOWR('#', 0x09, struct fw_cdev_queue_iso)
+#define FW_CDEV_IOC_START_ISO		_IOW('#', 0x0a, struct fw_cdev_start_iso)
+#define FW_CDEV_IOC_STOP_ISO		_IOW('#', 0x0b, struct fw_cdev_stop_iso)
+
+/* FW_CDEV_VERSION History
+ *
+ * 1	Feb 18, 2007:  Initial version.
+ */
+#define FW_CDEV_VERSION		1
+
+struct fw_cdev_get_info {
+	/* The version field is just a running serial number.  We
+	 * never break backwards compatibility.  Userspace passes in
+	 * the version it expects and the kernel passes back the
+	 * highest version it can provide.  Even if the structs in
+	 * this interface are extended in a later version, the kernel
+	 * will not copy back more data than what was present in the
+	 * interface version userspace expects. */
+	__u32 version;
+
+	/* If non-zero, at most rom_length bytes of config rom will be
+	 * copied into that user space address.  In either case,
+	 * rom_length is updated with the actual length of the config
+	 * rom. */
+	__u32 rom_length;
+	__u64 rom;
+
+	/* If non-zero, a fw_cdev_event_bus_reset struct will be
+	 * copied here with the current state of the bus.  This does
+	 * not cause a bus reset to happen.  The value of closure in
+	 * this and sub-sequent bus reset events is set to
+	 * bus_reset_closure. */
+	__u64 bus_reset;
+	__u64 bus_reset_closure;
+
+	/* The index of the card this devices belongs to. */
+	__u32 card;
+};
+
+struct fw_cdev_send_request {
+	__u32 tcode;
+	__u32 length;
+	__u64 offset;
+	__u64 closure;
+	__u64 data;
+	__u32 generation;
+};
+
+struct fw_cdev_send_response {
+	__u32 rcode;
+	__u32 length;
+	__u64 data;
+	__u32 handle;
+};
+
+struct fw_cdev_allocate {
+	__u64 offset;
+	__u64 closure;
+	__u32 length;
+	__u32 handle;
+};
+
+struct fw_cdev_deallocate {
+	__u32 handle;
+};
+
+#define FW_CDEV_LONG_RESET	0
+#define FW_CDEV_SHORT_RESET	1
+
+struct fw_cdev_initiate_bus_reset {
+	__u32 type;
+};
+
+struct fw_cdev_add_descriptor {
+	__u32 immediate;
+	__u32 key;
+	__u64 data;
+	__u32 length;
+	__u32 handle;
+};
+
+struct fw_cdev_remove_descriptor {
+	__u32 handle;
+};
+
+#define FW_CDEV_ISO_CONTEXT_TRANSMIT	0
+#define FW_CDEV_ISO_CONTEXT_RECEIVE	1
+
+#define FW_CDEV_ISO_CONTEXT_MATCH_TAG0		 1
+#define FW_CDEV_ISO_CONTEXT_MATCH_TAG1		 2
+#define FW_CDEV_ISO_CONTEXT_MATCH_TAG2		 4
+#define FW_CDEV_ISO_CONTEXT_MATCH_TAG3		 8
+#define FW_CDEV_ISO_CONTEXT_MATCH_ALL_TAGS	15
+
+struct fw_cdev_create_iso_context {
+	__u32 type;
+	__u32 header_size;
+	__u32 channel;
+	__u32 speed;
+	__u64 closure;
+	__u32 handle;
+};
+
+struct fw_cdev_iso_packet {
+	__u16 payload_length;	/* Length of indirect payload. */
+	__u32 interrupt : 1;	/* Generate interrupt on this packet */
+	__u32 skip : 1;		/* Set to not send packet at all. */
+	__u32 tag : 2;
+	__u32 sy : 4;
+	__u32 header_length : 8;	/* Length of immediate header. */
+	__u32 header[0];
+};
+
+struct fw_cdev_queue_iso {
+	__u64 packets;
+	__u64 data;
+	__u32 size;
+	__u32 handle;
+};
+
+struct fw_cdev_start_iso {
+	__s32 cycle;
+	__u32 sync;
+	__u32 tags;
+	__u32 handle;
+};
+
+struct fw_cdev_stop_iso {
+	__u32 handle;
+};
+
+#endif /* _LINUX_FIREWIRE_CDEV_H */
diff --git a/include/linux/firewire-constants.h b/include/linux/firewire-constants.h
new file mode 100644
index 000000000000..b316770a43fd
--- /dev/null
+++ b/include/linux/firewire-constants.h
@@ -0,0 +1,67 @@
+#ifndef _LINUX_FIREWIRE_CONSTANTS_H
+#define _LINUX_FIREWIRE_CONSTANTS_H
+
+#define TCODE_WRITE_QUADLET_REQUEST	0x0
+#define TCODE_WRITE_BLOCK_REQUEST	0x1
+#define TCODE_WRITE_RESPONSE		0x2
+#define TCODE_READ_QUADLET_REQUEST	0x4
+#define TCODE_READ_BLOCK_REQUEST	0x5
+#define TCODE_READ_QUADLET_RESPONSE	0x6
+#define TCODE_READ_BLOCK_RESPONSE	0x7
+#define TCODE_CYCLE_START		0x8
+#define TCODE_LOCK_REQUEST		0x9
+#define TCODE_STREAM_DATA		0xa
+#define TCODE_LOCK_RESPONSE		0xb
+
+#define EXTCODE_MASK_SWAP		0x1
+#define EXTCODE_COMPARE_SWAP		0x2
+#define EXTCODE_FETCH_ADD		0x3
+#define EXTCODE_LITTLE_ADD		0x4
+#define EXTCODE_BOUNDED_ADD		0x5
+#define EXTCODE_WRAP_ADD		0x6
+#define EXTCODE_VENDOR_DEPENDENT	0x7
+
+/* Juju specific tcodes */
+#define TCODE_LOCK_MASK_SWAP		(0x10 | EXTCODE_MASK_SWAP)
+#define TCODE_LOCK_COMPARE_SWAP		(0x10 | EXTCODE_COMPARE_SWAP)
+#define TCODE_LOCK_FETCH_ADD		(0x10 | EXTCODE_FETCH_ADD)
+#define TCODE_LOCK_LITTLE_ADD		(0x10 | EXTCODE_LITTLE_ADD)
+#define TCODE_LOCK_BOUNDED_ADD		(0x10 | EXTCODE_BOUNDED_ADD)
+#define TCODE_LOCK_WRAP_ADD		(0x10 | EXTCODE_WRAP_ADD)
+#define TCODE_LOCK_VENDOR_DEPENDENT	(0x10 | EXTCODE_VENDOR_DEPENDENT)
+
+#define RCODE_COMPLETE			0x0
+#define RCODE_CONFLICT_ERROR		0x4
+#define RCODE_DATA_ERROR		0x5
+#define RCODE_TYPE_ERROR		0x6
+#define RCODE_ADDRESS_ERROR		0x7
+
+/* Juju specific rcodes */
+#define RCODE_SEND_ERROR		0x10
+#define RCODE_CANCELLED			0x11
+#define RCODE_BUSY			0x12
+#define RCODE_GENERATION		0x13
+#define RCODE_NO_ACK			0x14
+
+#define SCODE_100			0x0
+#define SCODE_200			0x1
+#define SCODE_400			0x2
+#define SCODE_800			0x3
+#define SCODE_1600			0x4
+#define SCODE_3200			0x5
+#define SCODE_BETA			0x3
+
+#define ACK_COMPLETE			0x1
+#define ACK_PENDING			0x2
+#define ACK_BUSY_X			0x4
+#define ACK_BUSY_A			0x5
+#define ACK_BUSY_B			0x6
+#define ACK_DATA_ERROR			0xd
+#define ACK_TYPE_ERROR			0xe
+
+#define RETRY_1				0x00
+#define RETRY_X				0x01
+#define RETRY_A				0x02
+#define RETRY_B				0x03
+
+#endif /* _LINUX_FIREWIRE_CONSTANTS_H */
diff --git a/lib/Kconfig b/lib/Kconfig
index 96d6e8ca8b70..2e7ae6b9215b 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -23,6 +23,14 @@ config CRC16
 	  the kernel tree does. Such modules that use library CRC16
 	  functions require M here.
 
+config CRC_ITU_T
+	tristate "CRC ITU-T V.41 functions"
+	help
+	  This option is provided for the case where no in-kernel-tree
+	  modules require CRC ITU-T V.41 functions, but a module built outside
+	  the kernel tree does. Such modules that use library CRC ITU-T V.41
+	  functions require M here.
+
 config CRC32
 	tristate "CRC32 functions"
 	default y
diff --git a/lib/Makefile b/lib/Makefile
index ae57f357fec0..1f65b4613e09 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -41,6 +41,7 @@ endif
 obj-$(CONFIG_BITREVERSE) += bitrev.o
 obj-$(CONFIG_CRC_CCITT)	+= crc-ccitt.o
 obj-$(CONFIG_CRC16)	+= crc16.o
+obj-$(CONFIG_CRC_ITU_T)	+= crc-itu-t.o
 obj-$(CONFIG_CRC32)	+= crc32.o
 obj-$(CONFIG_LIBCRC32C)	+= libcrc32c.o
 obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o
diff --git a/lib/crc-itu-t.c b/lib/crc-itu-t.c
new file mode 100644
index 000000000000..a63472b82416
--- /dev/null
+++ b/lib/crc-itu-t.c
@@ -0,0 +1,69 @@
+/*
+ *      crc-itu-t.c
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/crc-itu-t.h>
+
+/** CRC table for the CRC ITU-T V.41 0x0x1021 (x^16 + x^12 + x^15 + 1) */
+const u16 crc_itu_t_table[256] = {
+	0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
+	0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
+	0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,
+	0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de,
+	0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485,
+	0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,
+	0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4,
+	0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc,
+	0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,
+	0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b,
+	0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,
+	0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,
+	0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41,
+	0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49,
+	0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,
+	0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78,
+	0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f,
+	0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,
+	0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e,
+	0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,
+	0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,
+	0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
+	0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c,
+	0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,
+	0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab,
+	0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3,
+	0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,
+	0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92,
+	0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,
+	0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,
+	0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8,
+	0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0
+};
+
+EXPORT_SYMBOL(crc_itu_t_table);
+
+/**
+ * crc_itu_t - Compute the CRC-ITU-T for the data buffer
+ *
+ * @crc:     previous CRC value
+ * @buffer:  data pointer
+ * @len:     number of bytes in the buffer
+ *
+ * Returns the updated CRC value
+ */
+u16 crc_itu_t(u16 crc, const u8 *buffer, size_t len)
+{
+	while (len--)
+		crc = crc_itu_t_byte(crc, *buffer++);
+	return crc;
+}
+EXPORT_SYMBOL(crc_itu_t);
+
+MODULE_DESCRIPTION("CRC ITU-T V.41 calculations");
+MODULE_LICENSE("GPL");
+