forked from Minki/linux
firewire: Uppercase most macro names.
Signed-off-by: Kristian Hoegsberg <krh@redhat.com> Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
This commit is contained in:
parent
a98e271987
commit
a77754a75d
@ -44,20 +44,20 @@ static LIST_HEAD(card_list);
|
||||
static LIST_HEAD(descriptor_list);
|
||||
static int descriptor_count;
|
||||
|
||||
#define bib_crc(v) ((v) << 0)
|
||||
#define bib_crc_length(v) ((v) << 16)
|
||||
#define bib_info_length(v) ((v) << 24)
|
||||
#define BIB_CRC(v) ((v) << 0)
|
||||
#define BIB_CRC_LENGTH(v) ((v) << 16)
|
||||
#define BIB_INFO_LENGTH(v) ((v) << 24)
|
||||
|
||||
#define bib_link_speed(v) ((v) << 0)
|
||||
#define bib_generation(v) ((v) << 4)
|
||||
#define bib_max_rom(v) ((v) << 8)
|
||||
#define bib_max_receive(v) ((v) << 12)
|
||||
#define bib_cyc_clk_acc(v) ((v) << 16)
|
||||
#define bib_pmc ((1) << 27)
|
||||
#define bib_bmc ((1) << 28)
|
||||
#define bib_isc ((1) << 29)
|
||||
#define bib_cmc ((1) << 30)
|
||||
#define bib_imc ((1) << 31)
|
||||
#define BIB_LINK_SPEED(v) ((v) << 0)
|
||||
#define BIB_GENERATION(v) ((v) << 4)
|
||||
#define BIB_MAX_ROM(v) ((v) << 8)
|
||||
#define BIB_MAX_RECEIVE(v) ((v) << 12)
|
||||
#define BIB_CYC_CLK_ACC(v) ((v) << 16)
|
||||
#define BIB_PMC ((1) << 27)
|
||||
#define BIB_BMC ((1) << 28)
|
||||
#define BIB_ISC ((1) << 29)
|
||||
#define BIB_CMC ((1) << 30)
|
||||
#define BIB_IMC ((1) << 31)
|
||||
|
||||
static u32 *
|
||||
generate_config_rom(struct fw_card *card, size_t *config_rom_length)
|
||||
@ -76,15 +76,15 @@ generate_config_rom(struct fw_card *card, size_t *config_rom_length)
|
||||
*/
|
||||
|
||||
memset(config_rom, 0, sizeof config_rom);
|
||||
config_rom[0] = bib_crc_length(4) | bib_info_length(4) | bib_crc(0);
|
||||
config_rom[0] = BIB_CRC_LENGTH(4) | BIB_INFO_LENGTH(4) | BIB_CRC(0);
|
||||
config_rom[1] = 0x31333934;
|
||||
|
||||
config_rom[2] =
|
||||
bib_link_speed(card->link_speed) |
|
||||
bib_generation(card->config_rom_generation++ % 14 + 2) |
|
||||
bib_max_rom(2) |
|
||||
bib_max_receive(card->max_receive) |
|
||||
bib_bmc | bib_isc | bib_cmc | bib_imc;
|
||||
BIB_LINK_SPEED(card->link_speed) |
|
||||
BIB_GENERATION(card->config_rom_generation++ % 14 + 2) |
|
||||
BIB_MAX_ROM(2) |
|
||||
BIB_MAX_RECEIVE(card->max_receive) |
|
||||
BIB_BMC | BIB_ISC | BIB_CMC | BIB_IMC;
|
||||
config_rom[3] = card->guid >> 32;
|
||||
config_rom[4] = card->guid;
|
||||
|
||||
@ -318,7 +318,7 @@ fw_card_bm_work(struct work_struct *work)
|
||||
*/
|
||||
spin_unlock_irqrestore(&card->lock, flags);
|
||||
return;
|
||||
} else if (root->config_rom[2] & bib_cmc) {
|
||||
} else if (root->config_rom[2] & BIB_CMC) {
|
||||
/*
|
||||
* FIXME: I suppose we should set the cmstr bit in the
|
||||
* STATE_CLEAR register of this node, as described in
|
||||
|
@ -33,19 +33,19 @@
|
||||
#include "fw-transaction.h"
|
||||
#include "fw-ohci.h"
|
||||
|
||||
#define descriptor_output_more 0
|
||||
#define descriptor_output_last (1 << 12)
|
||||
#define descriptor_input_more (2 << 12)
|
||||
#define descriptor_input_last (3 << 12)
|
||||
#define descriptor_status (1 << 11)
|
||||
#define descriptor_key_immediate (2 << 8)
|
||||
#define descriptor_ping (1 << 7)
|
||||
#define descriptor_yy (1 << 6)
|
||||
#define descriptor_no_irq (0 << 4)
|
||||
#define descriptor_irq_error (1 << 4)
|
||||
#define descriptor_irq_always (3 << 4)
|
||||
#define descriptor_branch_always (3 << 2)
|
||||
#define descriptor_wait (3 << 0)
|
||||
#define DESCRIPTOR_OUTPUT_MORE 0
|
||||
#define DESCRIPTOR_OUTPUT_LAST (1 << 12)
|
||||
#define DESCRIPTOR_INPUT_MORE (2 << 12)
|
||||
#define DESCRIPTOR_INPUT_LAST (3 << 12)
|
||||
#define DESCRIPTOR_STATUS (1 << 11)
|
||||
#define DESCRIPTOR_KEY_IMMEDIATE (2 << 8)
|
||||
#define DESCRIPTOR_PING (1 << 7)
|
||||
#define DESCRIPTOR_YY (1 << 6)
|
||||
#define DESCRIPTOR_NO_IRQ (0 << 4)
|
||||
#define DESCRIPTOR_IRQ_ERROR (1 << 4)
|
||||
#define DESCRIPTOR_IRQ_ALWAYS (3 << 4)
|
||||
#define DESCRIPTOR_BRANCH_ALWAYS (3 << 2)
|
||||
#define DESCRIPTOR_WAIT (3 << 0)
|
||||
|
||||
struct descriptor {
|
||||
__le16 req_count;
|
||||
@ -70,10 +70,10 @@ struct db_descriptor {
|
||||
__le32 reserved1;
|
||||
} __attribute__((aligned(16)));
|
||||
|
||||
#define control_set(regs) (regs)
|
||||
#define control_clear(regs) ((regs) + 4)
|
||||
#define command_ptr(regs) ((regs) + 12)
|
||||
#define context_match(regs) ((regs) + 16)
|
||||
#define CONTROL_SET(regs) (regs)
|
||||
#define CONTROL_CLEAR(regs) ((regs) + 4)
|
||||
#define COMMAND_PTR(regs) ((regs) + 12)
|
||||
#define CONTEXT_MATCH(regs) ((regs) + 16)
|
||||
|
||||
struct ar_buffer {
|
||||
struct descriptor descriptor;
|
||||
@ -112,12 +112,12 @@ struct context {
|
||||
struct tasklet_struct tasklet;
|
||||
};
|
||||
|
||||
#define it_header_sy(v) ((v) << 0)
|
||||
#define it_header_tcode(v) ((v) << 4)
|
||||
#define it_header_channel(v) ((v) << 8)
|
||||
#define it_header_tag(v) ((v) << 14)
|
||||
#define it_header_speed(v) ((v) << 16)
|
||||
#define it_header_data_length(v) ((v) << 16)
|
||||
#define IT_HEADER_SY(v) ((v) << 0)
|
||||
#define IT_HEADER_TCODE(v) ((v) << 4)
|
||||
#define IT_HEADER_CHANNEL(v) ((v) << 8)
|
||||
#define IT_HEADER_TAG(v) ((v) << 14)
|
||||
#define IT_HEADER_SPEED(v) ((v) << 16)
|
||||
#define IT_HEADER_DATA_LENGTH(v) ((v) << 16)
|
||||
|
||||
struct iso_context {
|
||||
struct fw_iso_context base;
|
||||
@ -256,9 +256,9 @@ static int ar_context_add_page(struct ar_context *ctx)
|
||||
}
|
||||
|
||||
memset(&ab->descriptor, 0, sizeof ab->descriptor);
|
||||
ab->descriptor.control = cpu_to_le16(descriptor_input_more |
|
||||
descriptor_status |
|
||||
descriptor_branch_always);
|
||||
ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
|
||||
DESCRIPTOR_STATUS |
|
||||
DESCRIPTOR_BRANCH_ALWAYS);
|
||||
offset = offsetof(struct ar_buffer, data);
|
||||
ab->descriptor.req_count = cpu_to_le16(PAGE_SIZE - offset);
|
||||
ab->descriptor.data_address = cpu_to_le32(ab_bus + offset);
|
||||
@ -271,7 +271,7 @@ static int ar_context_add_page(struct ar_context *ctx)
|
||||
ctx->last_buffer->next = ab;
|
||||
ctx->last_buffer = ab;
|
||||
|
||||
reg_write(ctx->ohci, control_set(ctx->regs), CONTEXT_WAKE);
|
||||
reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
|
||||
flush_writes(ctx->ohci);
|
||||
|
||||
return 0;
|
||||
@ -416,8 +416,8 @@ ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, u32 regs)
|
||||
ctx->current_buffer = ab.next;
|
||||
ctx->pointer = ctx->current_buffer->data;
|
||||
|
||||
reg_write(ctx->ohci, command_ptr(ctx->regs), ab.descriptor.branch_address);
|
||||
reg_write(ctx->ohci, control_set(ctx->regs), CONTEXT_RUN);
|
||||
reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ab.descriptor.branch_address);
|
||||
reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
|
||||
flush_writes(ctx->ohci);
|
||||
|
||||
return 0;
|
||||
@ -488,7 +488,7 @@ context_init(struct context *ctx, struct fw_ohci *ohci,
|
||||
*/
|
||||
|
||||
memset(ctx->head_descriptor, 0, sizeof *ctx->head_descriptor);
|
||||
ctx->head_descriptor->control = cpu_to_le16(descriptor_output_last);
|
||||
ctx->head_descriptor->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
|
||||
ctx->head_descriptor->transfer_status = cpu_to_le16(0x8011);
|
||||
ctx->head_descriptor++;
|
||||
|
||||
@ -536,10 +536,10 @@ static void context_run(struct context *ctx, u32 extra)
|
||||
{
|
||||
struct fw_ohci *ohci = ctx->ohci;
|
||||
|
||||
reg_write(ohci, command_ptr(ctx->regs),
|
||||
reg_write(ohci, COMMAND_PTR(ctx->regs),
|
||||
le32_to_cpu(ctx->tail_descriptor_last->branch_address));
|
||||
reg_write(ohci, control_clear(ctx->regs), ~0);
|
||||
reg_write(ohci, control_set(ctx->regs), CONTEXT_RUN | extra);
|
||||
reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
|
||||
reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
|
||||
flush_writes(ohci);
|
||||
}
|
||||
|
||||
@ -557,7 +557,7 @@ static void context_append(struct context *ctx,
|
||||
dma_sync_single_for_device(ctx->ohci->card.device, ctx->buffer_bus,
|
||||
ctx->buffer_size, DMA_TO_DEVICE);
|
||||
|
||||
reg_write(ctx->ohci, control_set(ctx->regs), CONTEXT_WAKE);
|
||||
reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
|
||||
flush_writes(ctx->ohci);
|
||||
}
|
||||
|
||||
@ -566,11 +566,11 @@ static void context_stop(struct context *ctx)
|
||||
u32 reg;
|
||||
int i;
|
||||
|
||||
reg_write(ctx->ohci, control_clear(ctx->regs), CONTEXT_RUN);
|
||||
reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
|
||||
flush_writes(ctx->ohci);
|
||||
|
||||
for (i = 0; i < 10; i++) {
|
||||
reg = reg_read(ctx->ohci, control_set(ctx->regs));
|
||||
reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
|
||||
if ((reg & CONTEXT_ACTIVE) == 0)
|
||||
break;
|
||||
|
||||
@ -605,7 +605,7 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
|
||||
return -1;
|
||||
}
|
||||
|
||||
d[0].control = cpu_to_le16(descriptor_key_immediate);
|
||||
d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
|
||||
d[0].res_count = cpu_to_le16(packet->timestamp);
|
||||
|
||||
/*
|
||||
@ -660,9 +660,9 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
|
||||
z = 2;
|
||||
}
|
||||
|
||||
last->control |= cpu_to_le16(descriptor_output_last |
|
||||
descriptor_irq_always |
|
||||
descriptor_branch_always);
|
||||
last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
|
||||
DESCRIPTOR_IRQ_ALWAYS |
|
||||
DESCRIPTOR_BRANCH_ALWAYS);
|
||||
|
||||
/* FIXME: Document how the locking works. */
|
||||
if (ohci->generation != packet->generation) {
|
||||
@ -673,7 +673,7 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
|
||||
context_append(ctx, d, z, 4 - z);
|
||||
|
||||
/* If the context isn't already running, start it up. */
|
||||
reg = reg_read(ctx->ohci, control_set(ctx->regs));
|
||||
reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
|
||||
if ((reg & CONTEXT_RUN) == 0)
|
||||
context_run(ctx, 0);
|
||||
|
||||
@ -750,11 +750,11 @@ static int handle_at_packet(struct context *context,
|
||||
return 1;
|
||||
}
|
||||
|
||||
#define header_get_destination(q) (((q) >> 16) & 0xffff)
|
||||
#define header_get_tcode(q) (((q) >> 4) & 0x0f)
|
||||
#define header_get_offset_high(q) (((q) >> 0) & 0xffff)
|
||||
#define header_get_data_length(q) (((q) >> 16) & 0xffff)
|
||||
#define header_get_extended_tcode(q) (((q) >> 0) & 0xffff)
|
||||
#define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff)
|
||||
#define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f)
|
||||
#define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff)
|
||||
#define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
|
||||
#define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
|
||||
|
||||
static void
|
||||
handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
|
||||
@ -762,9 +762,9 @@ handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
|
||||
struct fw_packet response;
|
||||
int tcode, length, i;
|
||||
|
||||
tcode = header_get_tcode(packet->header[0]);
|
||||
tcode = HEADER_GET_TCODE(packet->header[0]);
|
||||
if (TCODE_IS_BLOCK_PACKET(tcode))
|
||||
length = header_get_data_length(packet->header[3]);
|
||||
length = HEADER_GET_DATA_LENGTH(packet->header[3]);
|
||||
else
|
||||
length = 4;
|
||||
|
||||
@ -791,10 +791,10 @@ handle_local_lock(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
|
||||
__be32 *payload, lock_old;
|
||||
u32 lock_arg, lock_data;
|
||||
|
||||
tcode = header_get_tcode(packet->header[0]);
|
||||
length = header_get_data_length(packet->header[3]);
|
||||
tcode = HEADER_GET_TCODE(packet->header[0]);
|
||||
length = HEADER_GET_DATA_LENGTH(packet->header[3]);
|
||||
payload = packet->payload;
|
||||
ext_tcode = header_get_extended_tcode(packet->header[3]);
|
||||
ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]);
|
||||
|
||||
if (tcode == TCODE_LOCK_REQUEST &&
|
||||
ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) {
|
||||
@ -838,7 +838,7 @@ handle_local_request(struct context *ctx, struct fw_packet *packet)
|
||||
|
||||
offset =
|
||||
((unsigned long long)
|
||||
header_get_offset_high(packet->header[1]) << 32) |
|
||||
HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) |
|
||||
packet->header[2];
|
||||
csr = offset - CSR_REGISTER_BASE;
|
||||
|
||||
@ -874,7 +874,7 @@ at_context_transmit(struct context *ctx, struct fw_packet *packet)
|
||||
|
||||
spin_lock_irqsave(&ctx->ohci->lock, flags);
|
||||
|
||||
if (header_get_destination(packet->header[0]) == ctx->ohci->node_id &&
|
||||
if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id &&
|
||||
ctx->ohci->generation == packet->generation) {
|
||||
spin_unlock_irqrestore(&ctx->ohci->lock, flags);
|
||||
handle_local_request(ctx, packet);
|
||||
@ -1306,7 +1306,7 @@ static int handle_ir_dualbuffer_packet(struct context *context,
|
||||
|
||||
ctx->header_length = i;
|
||||
|
||||
if (le16_to_cpu(db->control) & descriptor_irq_always) {
|
||||
if (le16_to_cpu(db->control) & DESCRIPTOR_IRQ_ALWAYS) {
|
||||
ir_header = (__le32 *) (db + 1);
|
||||
ctx->base.callback(&ctx->base,
|
||||
le32_to_cpu(ir_header[0]) & 0xffff,
|
||||
@ -1329,7 +1329,7 @@ static int handle_it_packet(struct context *context,
|
||||
/* This descriptor isn't done yet, stop iteration. */
|
||||
return 0;
|
||||
|
||||
if (le16_to_cpu(last->control) & descriptor_irq_always)
|
||||
if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS)
|
||||
ctx->base.callback(&ctx->base, le16_to_cpu(last->res_count),
|
||||
0, NULL, ctx->base.callback_data);
|
||||
|
||||
@ -1428,7 +1428,7 @@ static int ohci_start_iso(struct fw_iso_context *base,
|
||||
|
||||
reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index);
|
||||
reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
|
||||
reg_write(ohci, context_match(ctx->context.regs), match);
|
||||
reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
|
||||
context_run(&ctx->context, control);
|
||||
}
|
||||
|
||||
@ -1525,17 +1525,17 @@ ohci_queue_iso_transmit(struct fw_iso_context *base,
|
||||
return -ENOMEM;
|
||||
|
||||
if (!p->skip) {
|
||||
d[0].control = cpu_to_le16(descriptor_key_immediate);
|
||||
d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
|
||||
d[0].req_count = cpu_to_le16(8);
|
||||
|
||||
header = (__le32 *) &d[1];
|
||||
header[0] = cpu_to_le32(it_header_sy(p->sy) |
|
||||
it_header_tag(p->tag) |
|
||||
it_header_tcode(TCODE_STREAM_DATA) |
|
||||
it_header_channel(ctx->base.channel) |
|
||||
it_header_speed(ctx->base.speed));
|
||||
header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
|
||||
IT_HEADER_TAG(p->tag) |
|
||||
IT_HEADER_TCODE(TCODE_STREAM_DATA) |
|
||||
IT_HEADER_CHANNEL(ctx->base.channel) |
|
||||
IT_HEADER_SPEED(ctx->base.speed));
|
||||
header[1] =
|
||||
cpu_to_le32(it_header_data_length(p->header_length +
|
||||
cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length +
|
||||
p->payload_length));
|
||||
}
|
||||
|
||||
@ -1562,14 +1562,14 @@ ohci_queue_iso_transmit(struct fw_iso_context *base,
|
||||
}
|
||||
|
||||
if (p->interrupt)
|
||||
irq = descriptor_irq_always;
|
||||
irq = DESCRIPTOR_IRQ_ALWAYS;
|
||||
else
|
||||
irq = descriptor_no_irq;
|
||||
irq = DESCRIPTOR_NO_IRQ;
|
||||
|
||||
last = z == 2 ? d : d + z - 1;
|
||||
last->control |= cpu_to_le16(descriptor_output_last |
|
||||
descriptor_status |
|
||||
descriptor_branch_always |
|
||||
last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
|
||||
DESCRIPTOR_STATUS |
|
||||
DESCRIPTOR_BRANCH_ALWAYS |
|
||||
irq);
|
||||
|
||||
context_append(&ctx->context, d, z, header_z);
|
||||
@ -1602,9 +1602,9 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
|
||||
return -ENOMEM;
|
||||
|
||||
db = (struct db_descriptor *) d;
|
||||
db->control = cpu_to_le16(descriptor_status |
|
||||
descriptor_branch_always |
|
||||
descriptor_wait);
|
||||
db->control = cpu_to_le16(DESCRIPTOR_STATUS |
|
||||
DESCRIPTOR_BRANCH_ALWAYS |
|
||||
DESCRIPTOR_WAIT);
|
||||
db->first_size = cpu_to_le16(ctx->base.header_size + 4);
|
||||
context_append(&ctx->context, d, 2, 0);
|
||||
}
|
||||
@ -1634,8 +1634,8 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
|
||||
return -ENOMEM;
|
||||
|
||||
db = (struct db_descriptor *) d;
|
||||
db->control = cpu_to_le16(descriptor_status |
|
||||
descriptor_branch_always);
|
||||
db->control = cpu_to_le16(DESCRIPTOR_STATUS |
|
||||
DESCRIPTOR_BRANCH_ALWAYS);
|
||||
db->first_size = cpu_to_le16(ctx->base.header_size + 4);
|
||||
db->first_req_count = cpu_to_le16(header_size);
|
||||
db->first_res_count = db->first_req_count;
|
||||
@ -1652,7 +1652,7 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
|
||||
db->second_buffer = cpu_to_le32(page_bus + offset);
|
||||
|
||||
if (p->interrupt && length == rest)
|
||||
db->control |= cpu_to_le16(descriptor_irq_always);
|
||||
db->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
|
||||
|
||||
context_append(&ctx->context, d, z, header_z);
|
||||
offset = (offset + length) & ~PAGE_MASK;
|
||||
|
@ -123,14 +123,14 @@ struct sbp2_device {
|
||||
#define SBP2_STATUS_ILLEGAL_REQUEST 0x2
|
||||
#define SBP2_STATUS_VENDOR_DEPENDENT 0x3
|
||||
|
||||
#define status_get_orb_high(v) ((v).status & 0xffff)
|
||||
#define status_get_sbp_status(v) (((v).status >> 16) & 0xff)
|
||||
#define status_get_len(v) (((v).status >> 24) & 0x07)
|
||||
#define status_get_dead(v) (((v).status >> 27) & 0x01)
|
||||
#define status_get_response(v) (((v).status >> 28) & 0x03)
|
||||
#define status_get_source(v) (((v).status >> 30) & 0x03)
|
||||
#define status_get_orb_low(v) ((v).orb_low)
|
||||
#define status_get_data(v) ((v).data)
|
||||
#define STATUS_GET_ORB_HIGH(v) ((v).status & 0xffff)
|
||||
#define STATUS_GET_SBP_STATUS(v) (((v).status >> 16) & 0xff)
|
||||
#define STATUS_GET_LEN(v) (((v).status >> 24) & 0x07)
|
||||
#define STATUS_GET_DEAD(v) (((v).status >> 27) & 0x01)
|
||||
#define STATUS_GET_RESPONSE(v) (((v).status >> 28) & 0x03)
|
||||
#define STATUS_GET_SOURCE(v) (((v).status >> 30) & 0x03)
|
||||
#define STATUS_GET_ORB_LOW(v) ((v).orb_low)
|
||||
#define STATUS_GET_DATA(v) ((v).data)
|
||||
|
||||
struct sbp2_status {
|
||||
u32 status;
|
||||
@ -152,15 +152,15 @@ struct sbp2_orb {
|
||||
struct list_head link;
|
||||
};
|
||||
|
||||
#define management_orb_lun(v) ((v))
|
||||
#define management_orb_function(v) ((v) << 16)
|
||||
#define management_orb_reconnect(v) ((v) << 20)
|
||||
#define management_orb_exclusive ((1) << 28)
|
||||
#define management_orb_request_format(v) ((v) << 29)
|
||||
#define management_orb_notify ((1) << 31)
|
||||
#define MANAGEMENT_ORB_LUN(v) ((v))
|
||||
#define MANAGEMENT_ORB_FUNCTION(v) ((v) << 16)
|
||||
#define MANAGEMENT_ORB_RECONNECT(v) ((v) << 20)
|
||||
#define MANAGEMENT_ORB_EXCLUSIVE ((1) << 28)
|
||||
#define MANAGEMENT_ORB_REQUEST_FORMAT(v) ((v) << 29)
|
||||
#define MANAGEMENT_ORB_NOTIFY ((1) << 31)
|
||||
|
||||
#define management_orb_response_length(v) ((v))
|
||||
#define management_orb_password_length(v) ((v) << 16)
|
||||
#define MANAGEMENT_ORB_RESPONSE_LENGTH(v) ((v))
|
||||
#define MANAGEMENT_ORB_PASSWORD_LENGTH(v) ((v) << 16)
|
||||
|
||||
struct sbp2_management_orb {
|
||||
struct sbp2_orb base;
|
||||
@ -177,23 +177,22 @@ struct sbp2_management_orb {
|
||||
struct sbp2_status status;
|
||||
};
|
||||
|
||||
#define login_response_get_login_id(v) ((v).misc & 0xffff)
|
||||
#define login_response_get_length(v) (((v).misc >> 16) & 0xffff)
|
||||
#define LOGIN_RESPONSE_GET_LOGIN_ID(v) ((v).misc & 0xffff)
|
||||
#define LOGIN_RESPONSE_GET_LENGTH(v) (((v).misc >> 16) & 0xffff)
|
||||
|
||||
struct sbp2_login_response {
|
||||
u32 misc;
|
||||
struct sbp2_pointer command_block_agent;
|
||||
u32 reconnect_hold;
|
||||
};
|
||||
|
||||
#define command_orb_data_size(v) ((v))
|
||||
#define command_orb_page_size(v) ((v) << 16)
|
||||
#define command_orb_page_table_present ((1) << 19)
|
||||
#define command_orb_max_payload(v) ((v) << 20)
|
||||
#define command_orb_speed(v) ((v) << 24)
|
||||
#define command_orb_direction(v) ((v) << 27)
|
||||
#define command_orb_request_format(v) ((v) << 29)
|
||||
#define command_orb_notify ((1) << 31)
|
||||
#define COMMAND_ORB_DATA_SIZE(v) ((v))
|
||||
#define COMMAND_ORB_PAGE_SIZE(v) ((v) << 16)
|
||||
#define COMMAND_ORB_PAGE_TABLE_PRESENT ((1) << 19)
|
||||
#define COMMAND_ORB_MAX_PAYLOAD(v) ((v) << 20)
|
||||
#define COMMAND_ORB_SPEED(v) ((v) << 24)
|
||||
#define COMMAND_ORB_DIRECTION(v) ((v) << 27)
|
||||
#define COMMAND_ORB_REQUEST_FORMAT(v) ((v) << 29)
|
||||
#define COMMAND_ORB_NOTIFY ((1) << 31)
|
||||
|
||||
struct sbp2_command_orb {
|
||||
struct sbp2_orb base;
|
||||
@ -290,7 +289,7 @@ sbp2_status_write(struct fw_card *card, struct fw_request *request,
|
||||
fw_memcpy_from_be32(&status, payload, header_size);
|
||||
if (length > header_size)
|
||||
memcpy(status.data, payload + 8, length - header_size);
|
||||
if (status_get_source(status) == 2 || status_get_source(status) == 3) {
|
||||
if (STATUS_GET_SOURCE(status) == 2 || STATUS_GET_SOURCE(status) == 3) {
|
||||
fw_notify("non-orb related status write, not handled\n");
|
||||
fw_send_response(card, request, RCODE_COMPLETE);
|
||||
return;
|
||||
@ -299,8 +298,8 @@ sbp2_status_write(struct fw_card *card, struct fw_request *request,
|
||||
/* Lookup the orb corresponding to this status write. */
|
||||
spin_lock_irqsave(&card->lock, flags);
|
||||
list_for_each_entry(orb, &sd->orb_list, link) {
|
||||
if (status_get_orb_high(status) == 0 &&
|
||||
status_get_orb_low(status) == orb->request_bus &&
|
||||
if (STATUS_GET_ORB_HIGH(status) == 0 &&
|
||||
STATUS_GET_ORB_LOW(status) == orb->request_bus &&
|
||||
orb->rcode == RCODE_COMPLETE) {
|
||||
list_del(&orb->link);
|
||||
break;
|
||||
@ -425,11 +424,11 @@ sbp2_send_management_orb(struct fw_unit *unit, int node_id, int generation,
|
||||
orb->request.response.low = orb->response_bus;
|
||||
|
||||
orb->request.misc =
|
||||
management_orb_notify |
|
||||
management_orb_function(function) |
|
||||
management_orb_lun(lun);
|
||||
MANAGEMENT_ORB_NOTIFY |
|
||||
MANAGEMENT_ORB_FUNCTION(function) |
|
||||
MANAGEMENT_ORB_LUN(lun);
|
||||
orb->request.length =
|
||||
management_orb_response_length(sizeof orb->response);
|
||||
MANAGEMENT_ORB_RESPONSE_LENGTH(sizeof orb->response);
|
||||
|
||||
orb->request.status_fifo.high = sd->address_handler.offset >> 32;
|
||||
orb->request.status_fifo.low = sd->address_handler.offset;
|
||||
@ -441,8 +440,8 @@ sbp2_send_management_orb(struct fw_unit *unit, int node_id, int generation,
|
||||
*/
|
||||
if (function == SBP2_LOGIN_REQUEST) {
|
||||
orb->request.misc |=
|
||||
management_orb_exclusive |
|
||||
management_orb_reconnect(0);
|
||||
MANAGEMENT_ORB_EXCLUSIVE |
|
||||
MANAGEMENT_ORB_RECONNECT(0);
|
||||
}
|
||||
|
||||
fw_memcpy_to_be32(&orb->request, &orb->request, sizeof orb->request);
|
||||
@ -469,11 +468,11 @@ sbp2_send_management_orb(struct fw_unit *unit, int node_id, int generation,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (status_get_response(orb->status) != 0 ||
|
||||
status_get_sbp_status(orb->status) != 0) {
|
||||
if (STATUS_GET_RESPONSE(orb->status) != 0 ||
|
||||
STATUS_GET_SBP_STATUS(orb->status) != 0) {
|
||||
fw_error("error status: %d:%d\n",
|
||||
status_get_response(orb->status),
|
||||
status_get_sbp_status(orb->status));
|
||||
STATUS_GET_RESPONSE(orb->status),
|
||||
STATUS_GET_SBP_STATUS(orb->status));
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -577,7 +576,7 @@ static void sbp2_login(struct work_struct *work)
|
||||
sd->command_block_agent_address =
|
||||
((u64) (response.command_block_agent.high & 0xffff) << 32) |
|
||||
response.command_block_agent.low;
|
||||
sd->login_id = login_response_get_login_id(response);
|
||||
sd->login_id = LOGIN_RESPONSE_GET_LOGIN_ID(response);
|
||||
|
||||
fw_notify("logged in to sbp2 unit %s (%d retries)\n",
|
||||
unit->device.bus_id, sd->retries);
|
||||
@ -828,10 +827,10 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
|
||||
int result;
|
||||
|
||||
if (status != NULL) {
|
||||
if (status_get_dead(*status))
|
||||
if (STATUS_GET_DEAD(*status))
|
||||
sbp2_agent_reset(unit);
|
||||
|
||||
switch (status_get_response(*status)) {
|
||||
switch (STATUS_GET_RESPONSE(*status)) {
|
||||
case SBP2_STATUS_REQUEST_COMPLETE:
|
||||
result = DID_OK << 16;
|
||||
break;
|
||||
@ -845,8 +844,8 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
|
||||
break;
|
||||
}
|
||||
|
||||
if (result == DID_OK << 16 && status_get_len(*status) > 1)
|
||||
result = sbp2_status_to_sense_data(status_get_data(*status),
|
||||
if (result == DID_OK << 16 && STATUS_GET_LEN(*status) > 1)
|
||||
result = sbp2_status_to_sense_data(STATUS_GET_DATA(*status),
|
||||
orb->cmd->sense_buffer);
|
||||
} else {
|
||||
/*
|
||||
@ -906,7 +905,7 @@ static void sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb)
|
||||
orb->request.data_descriptor.high = sd->address_high;
|
||||
orb->request.data_descriptor.low = sg_dma_address(sg);
|
||||
orb->request.misc |=
|
||||
command_orb_data_size(sg_dma_len(sg));
|
||||
COMMAND_ORB_DATA_SIZE(sg_dma_len(sg));
|
||||
return;
|
||||
}
|
||||
|
||||
@ -943,8 +942,8 @@ static void sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb)
|
||||
orb->request.data_descriptor.high = sd->address_high;
|
||||
orb->request.data_descriptor.low = orb->page_table_bus;
|
||||
orb->request.misc |=
|
||||
command_orb_page_table_present |
|
||||
command_orb_data_size(j);
|
||||
COMMAND_ORB_PAGE_TABLE_PRESENT |
|
||||
COMMAND_ORB_DATA_SIZE(j);
|
||||
|
||||
fw_memcpy_to_be32(orb->page_table, orb->page_table, size);
|
||||
}
|
||||
@ -969,7 +968,7 @@ static void sbp2_command_orb_map_buffer(struct sbp2_command_orb *orb)
|
||||
orb->request.data_descriptor.high = sd->address_high;
|
||||
orb->request.data_descriptor.low = orb->request_buffer_bus;
|
||||
orb->request.misc |=
|
||||
command_orb_data_size(orb->cmd->request_bufflen);
|
||||
COMMAND_ORB_DATA_SIZE(orb->cmd->request_bufflen);
|
||||
}
|
||||
|
||||
/* SCSI stack integration */
|
||||
@ -1017,16 +1016,16 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
|
||||
* if we set this to max_speed + 7, we get the right value.
|
||||
*/
|
||||
orb->request.misc =
|
||||
command_orb_max_payload(device->node->max_speed + 7) |
|
||||
command_orb_speed(device->node->max_speed) |
|
||||
command_orb_notify;
|
||||
COMMAND_ORB_MAX_PAYLOAD(device->node->max_speed + 7) |
|
||||
COMMAND_ORB_SPEED(device->node->max_speed) |
|
||||
COMMAND_ORB_NOTIFY;
|
||||
|
||||
if (cmd->sc_data_direction == DMA_FROM_DEVICE)
|
||||
orb->request.misc |=
|
||||
command_orb_direction(SBP2_DIRECTION_FROM_MEDIA);
|
||||
COMMAND_ORB_DIRECTION(SBP2_DIRECTION_FROM_MEDIA);
|
||||
else if (cmd->sc_data_direction == DMA_TO_DEVICE)
|
||||
orb->request.misc |=
|
||||
command_orb_direction(SBP2_DIRECTION_TO_MEDIA);
|
||||
COMMAND_ORB_DIRECTION(SBP2_DIRECTION_TO_MEDIA);
|
||||
|
||||
if (cmd->use_sg) {
|
||||
sbp2_command_orb_map_scatterlist(orb);
|
||||
|
@ -24,16 +24,16 @@
|
||||
#include "fw-transaction.h"
|
||||
#include "fw-topology.h"
|
||||
|
||||
#define self_id_phy_id(q) (((q) >> 24) & 0x3f)
|
||||
#define self_id_extended(q) (((q) >> 23) & 0x01)
|
||||
#define self_id_link_on(q) (((q) >> 22) & 0x01)
|
||||
#define self_id_gap_count(q) (((q) >> 16) & 0x3f)
|
||||
#define self_id_phy_speed(q) (((q) >> 14) & 0x03)
|
||||
#define self_id_contender(q) (((q) >> 11) & 0x01)
|
||||
#define self_id_phy_initiator(q) (((q) >> 1) & 0x01)
|
||||
#define self_id_more_packets(q) (((q) >> 0) & 0x01)
|
||||
#define SELF_ID_PHY_ID(q) (((q) >> 24) & 0x3f)
|
||||
#define SELF_ID_EXTENDED(q) (((q) >> 23) & 0x01)
|
||||
#define SELF_ID_LINK_ON(q) (((q) >> 22) & 0x01)
|
||||
#define SELF_ID_GAP_COUNT(q) (((q) >> 16) & 0x3f)
|
||||
#define SELF_ID_PHY_SPEED(q) (((q) >> 14) & 0x03)
|
||||
#define SELF_ID_CONTENDER(q) (((q) >> 11) & 0x01)
|
||||
#define SELF_ID_PHY_INITIATOR(q) (((q) >> 1) & 0x01)
|
||||
#define SELF_ID_MORE_PACKETS(q) (((q) >> 0) & 0x01)
|
||||
|
||||
#define self_id_ext_sequence(q) (((q) >> 20) & 0x07)
|
||||
#define SELF_ID_EXT_SEQUENCE(q) (((q) >> 20) & 0x07)
|
||||
|
||||
static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count)
|
||||
{
|
||||
@ -61,7 +61,7 @@ static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count)
|
||||
|
||||
shift -= 2;
|
||||
if (shift == 0) {
|
||||
if (!self_id_more_packets(q))
|
||||
if (!SELF_ID_MORE_PACKETS(q))
|
||||
return sid + 1;
|
||||
|
||||
shift = 16;
|
||||
@ -75,8 +75,8 @@ static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count)
|
||||
* packets increase as expected.
|
||||
*/
|
||||
|
||||
if (!self_id_extended(q) ||
|
||||
seq != self_id_ext_sequence(q))
|
||||
if (!SELF_ID_EXTENDED(q) ||
|
||||
seq != SELF_ID_EXT_SEQUENCE(q))
|
||||
return NULL;
|
||||
|
||||
seq++;
|
||||
@ -103,9 +103,9 @@ static struct fw_node *fw_node_create(u32 sid, int port_count, int color)
|
||||
return NULL;
|
||||
|
||||
node->color = color;
|
||||
node->node_id = LOCAL_BUS | self_id_phy_id(sid);
|
||||
node->link_on = self_id_link_on(sid);
|
||||
node->phy_speed = self_id_phy_speed(sid);
|
||||
node->node_id = LOCAL_BUS | SELF_ID_PHY_ID(sid);
|
||||
node->link_on = SELF_ID_LINK_ON(sid);
|
||||
node->phy_speed = SELF_ID_PHY_SPEED(sid);
|
||||
node->port_count = port_count;
|
||||
|
||||
atomic_set(&node->ref_count, 1);
|
||||
@ -181,7 +181,7 @@ static struct fw_node *build_tree(struct fw_card *card,
|
||||
end = sid + self_id_count;
|
||||
phy_id = 0;
|
||||
irm_node = NULL;
|
||||
gap_count = self_id_gap_count(*sid);
|
||||
gap_count = SELF_ID_GAP_COUNT(*sid);
|
||||
topology_type = 0;
|
||||
|
||||
while (sid < end) {
|
||||
@ -193,9 +193,9 @@ static struct fw_node *build_tree(struct fw_card *card,
|
||||
}
|
||||
|
||||
q = *sid;
|
||||
if (phy_id != self_id_phy_id(q)) {
|
||||
if (phy_id != SELF_ID_PHY_ID(q)) {
|
||||
fw_error("PHY ID mismatch in self ID: %d != %d.\n",
|
||||
phy_id, self_id_phy_id(q));
|
||||
phy_id, SELF_ID_PHY_ID(q));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -221,7 +221,7 @@ static struct fw_node *build_tree(struct fw_card *card,
|
||||
if (phy_id == (card->node_id & 0x3f))
|
||||
local_node = node;
|
||||
|
||||
if (self_id_contender(q))
|
||||
if (SELF_ID_CONTENDER(q))
|
||||
irm_node = node;
|
||||
|
||||
if (node->phy_speed == SCODE_BETA)
|
||||
@ -283,7 +283,7 @@ static struct fw_node *build_tree(struct fw_card *card,
|
||||
* setting, we fall back to 63 which will force a gap
|
||||
* count reconfiguration and a reset.
|
||||
*/
|
||||
if (self_id_gap_count(q) != gap_count)
|
||||
if (SELF_ID_GAP_COUNT(q) != gap_count)
|
||||
gap_count = 63;
|
||||
|
||||
update_hop_count(node);
|
||||
|
@ -34,29 +34,29 @@
|
||||
#include "fw-topology.h"
|
||||
#include "fw-device.h"
|
||||
|
||||
#define header_pri(pri) ((pri) << 0)
|
||||
#define header_tcode(tcode) ((tcode) << 4)
|
||||
#define header_retry(retry) ((retry) << 8)
|
||||
#define header_tlabel(tlabel) ((tlabel) << 10)
|
||||
#define header_destination(destination) ((destination) << 16)
|
||||
#define header_source(source) ((source) << 16)
|
||||
#define header_rcode(rcode) ((rcode) << 12)
|
||||
#define header_offset_high(offset_high) ((offset_high) << 0)
|
||||
#define header_data_length(length) ((length) << 16)
|
||||
#define header_extended_tcode(tcode) ((tcode) << 0)
|
||||
#define HEADER_PRI(pri) ((pri) << 0)
|
||||
#define HEADER_TCODE(tcode) ((tcode) << 4)
|
||||
#define HEADER_RETRY(retry) ((retry) << 8)
|
||||
#define HEADER_TLABEL(tlabel) ((tlabel) << 10)
|
||||
#define HEADER_DESTINATION(destination) ((destination) << 16)
|
||||
#define HEADER_SOURCE(source) ((source) << 16)
|
||||
#define HEADER_RCODE(rcode) ((rcode) << 12)
|
||||
#define HEADER_OFFSET_HIGH(offset_high) ((offset_high) << 0)
|
||||
#define HEADER_DATA_LENGTH(length) ((length) << 16)
|
||||
#define HEADER_EXTENDED_TCODE(tcode) ((tcode) << 0)
|
||||
|
||||
#define header_get_tcode(q) (((q) >> 4) & 0x0f)
|
||||
#define header_get_tlabel(q) (((q) >> 10) & 0x3f)
|
||||
#define header_get_rcode(q) (((q) >> 12) & 0x0f)
|
||||
#define header_get_destination(q) (((q) >> 16) & 0xffff)
|
||||
#define header_get_source(q) (((q) >> 16) & 0xffff)
|
||||
#define header_get_offset_high(q) (((q) >> 0) & 0xffff)
|
||||
#define header_get_data_length(q) (((q) >> 16) & 0xffff)
|
||||
#define header_get_extended_tcode(q) (((q) >> 0) & 0xffff)
|
||||
#define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f)
|
||||
#define HEADER_GET_TLABEL(q) (((q) >> 10) & 0x3f)
|
||||
#define HEADER_GET_RCODE(q) (((q) >> 12) & 0x0f)
|
||||
#define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff)
|
||||
#define HEADER_GET_SOURCE(q) (((q) >> 16) & 0xffff)
|
||||
#define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff)
|
||||
#define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
|
||||
#define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
|
||||
|
||||
#define phy_config_gap_count(gap_count) (((gap_count) << 16) | (1 << 22))
|
||||
#define phy_config_root_id(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23))
|
||||
#define phy_identifier(id) ((id) << 30)
|
||||
#define PHY_CONFIG_GAP_COUNT(gap_count) (((gap_count) << 16) | (1 << 22))
|
||||
#define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23))
|
||||
#define PHY_IDENTIFIER(id) ((id) << 30)
|
||||
|
||||
static int
|
||||
close_transaction(struct fw_transaction *transaction,
|
||||
@ -159,12 +159,12 @@ fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
|
||||
ext_tcode = 0;
|
||||
|
||||
packet->header[0] =
|
||||
header_retry(RETRY_X) |
|
||||
header_tlabel(tlabel) |
|
||||
header_tcode(tcode) |
|
||||
header_destination(node_id);
|
||||
HEADER_RETRY(RETRY_X) |
|
||||
HEADER_TLABEL(tlabel) |
|
||||
HEADER_TCODE(tcode) |
|
||||
HEADER_DESTINATION(node_id);
|
||||
packet->header[1] =
|
||||
header_offset_high(offset >> 32) | header_source(source_id);
|
||||
HEADER_OFFSET_HIGH(offset >> 32) | HEADER_SOURCE(source_id);
|
||||
packet->header[2] =
|
||||
offset;
|
||||
|
||||
@ -178,8 +178,8 @@ fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
|
||||
case TCODE_LOCK_REQUEST:
|
||||
case TCODE_WRITE_BLOCK_REQUEST:
|
||||
packet->header[3] =
|
||||
header_data_length(length) |
|
||||
header_extended_tcode(ext_tcode);
|
||||
HEADER_DATA_LENGTH(length) |
|
||||
HEADER_EXTENDED_TCODE(ext_tcode);
|
||||
packet->header_length = 16;
|
||||
packet->payload = payload;
|
||||
packet->payload_length = length;
|
||||
@ -192,8 +192,8 @@ fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
|
||||
|
||||
case TCODE_READ_BLOCK_REQUEST:
|
||||
packet->header[3] =
|
||||
header_data_length(length) |
|
||||
header_extended_tcode(ext_tcode);
|
||||
HEADER_DATA_LENGTH(length) |
|
||||
HEADER_EXTENDED_TCODE(ext_tcode);
|
||||
packet->header_length = 16;
|
||||
packet->payload_length = 0;
|
||||
break;
|
||||
@ -325,9 +325,9 @@ void fw_send_phy_config(struct fw_card *card,
|
||||
{
|
||||
u32 q;
|
||||
|
||||
q = phy_identifier(PHY_PACKET_CONFIG) |
|
||||
phy_config_root_id(node_id) |
|
||||
phy_config_gap_count(gap_count);
|
||||
q = PHY_IDENTIFIER(PHY_PACKET_CONFIG) |
|
||||
PHY_CONFIG_ROOT_ID(node_id) |
|
||||
PHY_CONFIG_GAP_COUNT(gap_count);
|
||||
|
||||
send_phy_packet(card, q, generation);
|
||||
}
|
||||
@ -485,32 +485,32 @@ fw_fill_response(struct fw_packet *response, u32 *request_header,
|
||||
{
|
||||
int tcode, tlabel, extended_tcode, source, destination;
|
||||
|
||||
tcode = header_get_tcode(request_header[0]);
|
||||
tlabel = header_get_tlabel(request_header[0]);
|
||||
source = header_get_destination(request_header[0]);
|
||||
destination = header_get_source(request_header[1]);
|
||||
extended_tcode = header_get_extended_tcode(request_header[3]);
|
||||
tcode = HEADER_GET_TCODE(request_header[0]);
|
||||
tlabel = HEADER_GET_TLABEL(request_header[0]);
|
||||
source = HEADER_GET_DESTINATION(request_header[0]);
|
||||
destination = HEADER_GET_SOURCE(request_header[1]);
|
||||
extended_tcode = HEADER_GET_EXTENDED_TCODE(request_header[3]);
|
||||
|
||||
response->header[0] =
|
||||
header_retry(RETRY_1) |
|
||||
header_tlabel(tlabel) |
|
||||
header_destination(destination);
|
||||
HEADER_RETRY(RETRY_1) |
|
||||
HEADER_TLABEL(tlabel) |
|
||||
HEADER_DESTINATION(destination);
|
||||
response->header[1] =
|
||||
header_source(source) |
|
||||
header_rcode(rcode);
|
||||
HEADER_SOURCE(source) |
|
||||
HEADER_RCODE(rcode);
|
||||
response->header[2] = 0;
|
||||
|
||||
switch (tcode) {
|
||||
case TCODE_WRITE_QUADLET_REQUEST:
|
||||
case TCODE_WRITE_BLOCK_REQUEST:
|
||||
response->header[0] |= header_tcode(TCODE_WRITE_RESPONSE);
|
||||
response->header[0] |= HEADER_TCODE(TCODE_WRITE_RESPONSE);
|
||||
response->header_length = 12;
|
||||
response->payload_length = 0;
|
||||
break;
|
||||
|
||||
case TCODE_READ_QUADLET_REQUEST:
|
||||
response->header[0] |=
|
||||
header_tcode(TCODE_READ_QUADLET_RESPONSE);
|
||||
HEADER_TCODE(TCODE_READ_QUADLET_RESPONSE);
|
||||
if (payload != NULL)
|
||||
response->header[3] = *(u32 *)payload;
|
||||
else
|
||||
@ -521,10 +521,10 @@ fw_fill_response(struct fw_packet *response, u32 *request_header,
|
||||
|
||||
case TCODE_READ_BLOCK_REQUEST:
|
||||
case TCODE_LOCK_REQUEST:
|
||||
response->header[0] |= header_tcode(tcode + 2);
|
||||
response->header[0] |= HEADER_TCODE(tcode + 2);
|
||||
response->header[3] =
|
||||
header_data_length(length) |
|
||||
header_extended_tcode(extended_tcode);
|
||||
HEADER_DATA_LENGTH(length) |
|
||||
HEADER_EXTENDED_TCODE(extended_tcode);
|
||||
response->header_length = 16;
|
||||
response->payload = payload;
|
||||
response->payload_length = length;
|
||||
@ -544,7 +544,7 @@ allocate_request(struct fw_packet *p)
|
||||
u32 *data, length;
|
||||
int request_tcode, t;
|
||||
|
||||
request_tcode = header_get_tcode(p->header[0]);
|
||||
request_tcode = HEADER_GET_TCODE(p->header[0]);
|
||||
switch (request_tcode) {
|
||||
case TCODE_WRITE_QUADLET_REQUEST:
|
||||
data = &p->header[3];
|
||||
@ -554,7 +554,7 @@ allocate_request(struct fw_packet *p)
|
||||
case TCODE_WRITE_BLOCK_REQUEST:
|
||||
case TCODE_LOCK_REQUEST:
|
||||
data = p->payload;
|
||||
length = header_get_data_length(p->header[3]);
|
||||
length = HEADER_GET_DATA_LENGTH(p->header[3]);
|
||||
break;
|
||||
|
||||
case TCODE_READ_QUADLET_REQUEST:
|
||||
@ -564,7 +564,7 @@ allocate_request(struct fw_packet *p)
|
||||
|
||||
case TCODE_READ_BLOCK_REQUEST:
|
||||
data = NULL;
|
||||
length = header_get_data_length(p->header[3]);
|
||||
length = HEADER_GET_DATA_LENGTH(p->header[3]);
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -644,10 +644,10 @@ fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
|
||||
|
||||
offset =
|
||||
((unsigned long long)
|
||||
header_get_offset_high(p->header[1]) << 32) | p->header[2];
|
||||
tcode = header_get_tcode(p->header[0]);
|
||||
destination = header_get_destination(p->header[0]);
|
||||
source = header_get_source(p->header[0]);
|
||||
HEADER_GET_OFFSET_HIGH(p->header[1]) << 32) | p->header[2];
|
||||
tcode = HEADER_GET_TCODE(p->header[0]);
|
||||
destination = HEADER_GET_DESTINATION(p->header[0]);
|
||||
source = HEADER_GET_SOURCE(p->header[0]);
|
||||
|
||||
spin_lock_irqsave(&address_handler_lock, flags);
|
||||
handler = lookup_enclosing_address_handler(&address_handler_list,
|
||||
@ -682,11 +682,11 @@ fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
|
||||
size_t data_length;
|
||||
int tcode, tlabel, destination, source, rcode;
|
||||
|
||||
tcode = header_get_tcode(p->header[0]);
|
||||
tlabel = header_get_tlabel(p->header[0]);
|
||||
destination = header_get_destination(p->header[0]);
|
||||
source = header_get_source(p->header[1]);
|
||||
rcode = header_get_rcode(p->header[1]);
|
||||
tcode = HEADER_GET_TCODE(p->header[0]);
|
||||
tlabel = HEADER_GET_TLABEL(p->header[0]);
|
||||
destination = HEADER_GET_DESTINATION(p->header[0]);
|
||||
source = HEADER_GET_SOURCE(p->header[1]);
|
||||
rcode = HEADER_GET_RCODE(p->header[1]);
|
||||
|
||||
spin_lock_irqsave(&card->lock, flags);
|
||||
list_for_each_entry(t, &card->transaction_list, link) {
|
||||
@ -723,7 +723,7 @@ fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
|
||||
case TCODE_READ_BLOCK_RESPONSE:
|
||||
case TCODE_LOCK_RESPONSE:
|
||||
data = p->payload;
|
||||
data_length = header_get_data_length(p->header[3]);
|
||||
data_length = HEADER_GET_DATA_LENGTH(p->header[3]);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
Loading…
Reference in New Issue
Block a user