ALSA: firewire-lib: add replay target to cache sequence of packet

In design of audio and music unit in IEEE 1394 bus, feedback of
effective sampling transfer frequency (STF) is delivered by packets
transferred from device. The devices supported by ALSA firewire stack
are categorized to three groups regarding to it.

 * Group 1:
   * Echo Audio Fireworks board module
   * Oxford Semiconductor OXFW971 ASIC
   * Digidesign Digi00x family
   * Tascam FireWire series
   * RME Fireface series

 * Group 2:
   * BridgeCo. DM1000/DM1100/DM1500 ASICs for BeBoB solution
   * TC Applied Technologies DICE ASICs

 * Group 3:
   * Mark of the Unicord FireWire series

In group 1, the effective STF is determined by the sequence of the number
of events per packet. In group 2, the sequence of presentation timestamp
expressed in syt field of CIP header is interpreted as well. In group 3,
the presentation timestamp is expressed in source packet header (SPH) of
each data block.

I note that some models doesn't take care of effective STF with large
internal buffer. It's reasonable to name it as group 0:

 * Group 0
   * Oxford Semiconductor OXFW970 ASIC

The effective STF is known to be slightly different from nominal STF for
all of devices, and to be different between the devices. Furthermore, the
effective STF is known to be shifted for long-period transmission. This
makes it hard for software to satisfy the effective STF when processing
packets to the device.

The effective STF is deterministic as a result of analyzing the batch of
packet transferred from the device. For the analysis, caching the sequence
of parameter in the packet is required.

This commit adds an option so that AMDTP domain structure takes AMDTP
stream structure to cache the sequence of parameters in packet transferred
from the device. The parameters are offset ticks of syt field against the
cycle to receive the packet and the number of data blocks per packet.

Signed-off-by: Takashi Sakamoto <o-takashi@sakamocchi.jp>
Link: https://lore.kernel.org/r/20210527122611.173711-2-o-takashi@sakamocchi.jp
Signed-off-by: Takashi Iwai <tiwai@suse.de>
This commit is contained in:
Takashi Sakamoto 2021-05-27 21:26:09 +09:00 committed by Takashi Iwai
parent d955782da2
commit f9e5ecdfc2
10 changed files with 98 additions and 12 deletions

View File

@ -52,6 +52,7 @@
#define CIP_FDF_NO_DATA 0xff
#define CIP_SYT_MASK 0x0000ffff
#define CIP_SYT_NO_INFO 0xffff
#define CIP_SYT_CYCLE_MODULUS 16
#define CIP_NO_DATA ((CIP_FDF_NO_DATA << CIP_FDF_SHIFT) | CIP_SYT_NO_INFO)
#define CIP_HEADER_SIZE (sizeof(__be32) * CIP_HEADER_QUADLETS)
@ -473,6 +474,52 @@ static void pool_ideal_syt_offsets(struct amdtp_stream *s, struct seq_desc *desc
s->ctx_data.rx.syt_offset_state = state;
}
static unsigned int compute_syt_offset(unsigned int syt, unsigned int cycle,
unsigned int transfer_delay)
{
unsigned int cycle_lo = (cycle % CYCLES_PER_SECOND) & 0x0f;
unsigned int syt_cycle_lo = (syt & 0xf000) >> 12;
unsigned int syt_offset;
// Round up.
if (syt_cycle_lo < cycle_lo)
syt_cycle_lo += CIP_SYT_CYCLE_MODULUS;
syt_cycle_lo -= cycle_lo;
// Subtract transfer delay so that the synchronization offset is not so large
// at transmission.
syt_offset = syt_cycle_lo * TICKS_PER_CYCLE + (syt & 0x0fff);
if (syt_offset < transfer_delay)
syt_offset += CIP_SYT_CYCLE_MODULUS * TICKS_PER_CYCLE;
return syt_offset - transfer_delay;
}
static void cache_seq(struct amdtp_stream *s, const struct pkt_desc *descs, unsigned int desc_count)
{
const unsigned int transfer_delay = s->transfer_delay;
const unsigned int cache_size = s->ctx_data.tx.cache.size;
struct seq_desc *cache = s->ctx_data.tx.cache.descs;
unsigned int cache_tail = s->ctx_data.tx.cache.tail;
bool aware_syt = !(s->flags & CIP_UNAWARE_SYT);
int i;
for (i = 0; i < desc_count; ++i) {
struct seq_desc *dst = cache + cache_tail;
const struct pkt_desc *src = descs + i;
if (aware_syt && src->syt != CIP_SYT_NO_INFO)
dst->syt_offset = compute_syt_offset(src->syt, src->cycle, transfer_delay);
else
dst->syt_offset = CIP_SYT_NO_INFO;
dst->data_blocks = src->data_blocks;
cache_tail = (cache_tail + 1) % cache_size;
}
s->ctx_data.tx.cache.tail = cache_tail;
}
static void pool_ideal_seq_descs(struct amdtp_stream *s, unsigned int count)
{
struct seq_desc *descs = s->ctx_data.rx.seq.descs;
@ -1107,7 +1154,12 @@ static void process_tx_packets(struct fw_iso_context *context, u32 tstamp, size_
return;
}
} else {
struct amdtp_domain *d = s->domain;
process_ctx_payloads(s, s->pkt_descs, desc_count);
if (d->replay.enable)
cache_seq(s, s->pkt_descs, desc_count);
}
for (i = 0; i < packets; ++i) {
@ -1463,6 +1515,18 @@ static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed,
s->ctx_data.tx.max_ctx_payload_length = max_ctx_payload_size;
s->ctx_data.tx.ctx_header_size = ctx_header_size;
s->ctx_data.tx.event_starts = false;
if (s->domain->replay.enable) {
// struct fw_iso_context.drop_overflow_headers is false therefore it's
// possible to cache much unexpectedly.
s->ctx_data.tx.cache.size = max_t(unsigned int, s->syt_interval * 2,
queue_size * 3 / 2);
s->ctx_data.tx.cache.tail = 0;
s->ctx_data.tx.cache.descs = kcalloc(s->ctx_data.tx.cache.size,
sizeof(*s->ctx_data.tx.cache.descs), GFP_KERNEL);
if (!s->ctx_data.tx.cache.descs)
goto err_context;
}
} else {
static const struct {
unsigned int data_block;
@ -1543,8 +1607,12 @@ static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed,
err_pkt_descs:
kfree(s->pkt_descs);
err_context:
if (s->direction == AMDTP_OUT_STREAM)
if (s->direction == AMDTP_OUT_STREAM) {
kfree(s->ctx_data.rx.seq.descs);
} else {
if (s->domain->replay.enable)
kfree(s->ctx_data.tx.cache.descs);
}
fw_iso_context_destroy(s->context);
s->context = ERR_PTR(-1);
err_buffer:
@ -1655,8 +1723,12 @@ static void amdtp_stream_stop(struct amdtp_stream *s)
iso_packets_buffer_destroy(&s->buffer, s->unit);
kfree(s->pkt_descs);
if (s->direction == AMDTP_OUT_STREAM)
if (s->direction == AMDTP_OUT_STREAM) {
kfree(s->ctx_data.rx.seq.descs);
} else {
if (s->domain->replay.enable)
kfree(s->ctx_data.tx.cache.descs);
}
mutex_unlock(&s->mutex);
}
@ -1735,8 +1807,10 @@ EXPORT_SYMBOL_GPL(amdtp_domain_add_stream);
* @d: the AMDTP domain.
* @tx_init_skip_cycles: the number of cycles to skip processing packets at initial stage of IR
* contexts.
* @replay_seq: whether to replay the sequence of packet in IR context for the sequence of packet in
* IT context.
*/
int amdtp_domain_start(struct amdtp_domain *d, unsigned int tx_init_skip_cycles)
int amdtp_domain_start(struct amdtp_domain *d, unsigned int tx_init_skip_cycles, bool replay_seq)
{
unsigned int events_per_buffer = d->events_per_buffer;
unsigned int events_per_period = d->events_per_period;
@ -1744,6 +1818,8 @@ int amdtp_domain_start(struct amdtp_domain *d, unsigned int tx_init_skip_cycles)
struct amdtp_stream *s;
int err;
d->replay.enable = replay_seq;
// Select an IT context as IRQ target.
list_for_each_entry(s, &d->streams, list) {
if (s->direction == AMDTP_OUT_STREAM)

View File

@ -141,6 +141,12 @@ struct amdtp_stream {
// The device starts multiplexing events to the packet.
bool event_starts;
struct {
struct seq_desc *descs;
unsigned int size;
unsigned int tail;
} cache;
} tx;
struct {
// To generate CIP header.
@ -292,6 +298,10 @@ struct amdtp_domain {
unsigned int tx_start;
unsigned int rx_start;
} processing_cycle;
struct {
bool enable;
} replay;
};
int amdtp_domain_init(struct amdtp_domain *d);
@ -300,7 +310,7 @@ void amdtp_domain_destroy(struct amdtp_domain *d);
int amdtp_domain_add_stream(struct amdtp_domain *d, struct amdtp_stream *s,
int channel, int speed);
int amdtp_domain_start(struct amdtp_domain *d, unsigned int tx_init_skip_cycles);
int amdtp_domain_start(struct amdtp_domain *d, unsigned int tx_init_skip_cycles, bool replay_seq);
void amdtp_domain_stop(struct amdtp_domain *d);
static inline int amdtp_domain_set_events_per_period(struct amdtp_domain *d,

View File

@ -652,7 +652,7 @@ int snd_bebob_stream_start_duplex(struct snd_bebob *bebob)
// MEMO: In the early stage of packet streaming, the device transfers NODATA packets.
// After several hundred cycles, it begins to multiplex event into the packet with
// syt information.
err = amdtp_domain_start(&bebob->domain, tx_init_skip_cycles);
err = amdtp_domain_start(&bebob->domain, tx_init_skip_cycles, false);
if (err < 0)
goto error;

View File

@ -459,7 +459,7 @@ int snd_dice_stream_start_duplex(struct snd_dice *dice)
goto error;
}
err = amdtp_domain_start(&dice->domain, 0);
err = amdtp_domain_start(&dice->domain, 0, false);
if (err < 0)
goto error;

View File

@ -375,7 +375,7 @@ int snd_dg00x_stream_start_duplex(struct snd_dg00x *dg00x)
if (err < 0)
goto error;
err = amdtp_domain_start(&dg00x->domain, 0);
err = amdtp_domain_start(&dg00x->domain, 0, false);
if (err < 0)
goto error;

View File

@ -199,7 +199,7 @@ int snd_ff_stream_start_duplex(struct snd_ff *ff, unsigned int rate)
if (err < 0)
goto error;
err = amdtp_domain_start(&ff->domain, 0);
err = amdtp_domain_start(&ff->domain, 0, false);
if (err < 0)
goto error;

View File

@ -272,7 +272,7 @@ int snd_efw_stream_start_duplex(struct snd_efw *efw)
if (err < 0)
goto error;
err = amdtp_domain_start(&efw->domain, 0);
err = amdtp_domain_start(&efw->domain, 0, false);
if (err < 0)
goto error;

View File

@ -260,7 +260,7 @@ int snd_motu_stream_start_duplex(struct snd_motu *motu)
if (err < 0)
goto stop_streams;
err = amdtp_domain_start(&motu->domain, 0);
err = amdtp_domain_start(&motu->domain, 0, false);
if (err < 0)
goto stop_streams;

View File

@ -354,7 +354,7 @@ int snd_oxfw_stream_start_duplex(struct snd_oxfw *oxfw)
}
}
err = amdtp_domain_start(&oxfw->domain, 0);
err = amdtp_domain_start(&oxfw->domain, 0, false);
if (err < 0)
goto error;

View File

@ -473,7 +473,7 @@ int snd_tscm_stream_start_duplex(struct snd_tscm *tscm, unsigned int rate)
if (err < 0)
goto error;
err = amdtp_domain_start(&tscm->domain, 0);
err = amdtp_domain_start(&tscm->domain, 0, false);
if (err < 0)
return err;