|
|
|
@ -260,11 +260,18 @@ int amdtp_stream_set_parameters(struct amdtp_stream *s, unsigned int rate,
|
|
|
|
|
s->data_block_quadlets = data_block_quadlets;
|
|
|
|
|
s->syt_interval = amdtp_syt_intervals[sfc];
|
|
|
|
|
|
|
|
|
|
/* default buffering in the device */
|
|
|
|
|
s->transfer_delay = TRANSFER_DELAY_TICKS - TICKS_PER_CYCLE;
|
|
|
|
|
if (s->flags & CIP_BLOCKING)
|
|
|
|
|
/* additional buffering needed to adjust for no-data packets */
|
|
|
|
|
s->transfer_delay += TICKS_PER_SECOND * s->syt_interval / rate;
|
|
|
|
|
// default buffering in the device.
|
|
|
|
|
if (s->direction == AMDTP_OUT_STREAM) {
|
|
|
|
|
s->ctx_data.rx.transfer_delay =
|
|
|
|
|
TRANSFER_DELAY_TICKS - TICKS_PER_CYCLE;
|
|
|
|
|
|
|
|
|
|
if (s->flags & CIP_BLOCKING) {
|
|
|
|
|
// additional buffering needed to adjust for no-data
|
|
|
|
|
// packets.
|
|
|
|
|
s->ctx_data.rx.transfer_delay +=
|
|
|
|
|
TICKS_PER_SECOND * s->syt_interval / rate;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
@ -321,10 +328,10 @@ static unsigned int calculate_data_blocks(struct amdtp_stream *s,
|
|
|
|
|
/* Non-blocking mode. */
|
|
|
|
|
} else {
|
|
|
|
|
if (!cip_sfc_is_base_44100(s->sfc)) {
|
|
|
|
|
/* Sample_rate / 8000 is an integer, and precomputed. */
|
|
|
|
|
data_blocks = s->data_block_state;
|
|
|
|
|
// Sample_rate / 8000 is an integer, and precomputed.
|
|
|
|
|
data_blocks = s->ctx_data.rx.data_block_state;
|
|
|
|
|
} else {
|
|
|
|
|
phase = s->data_block_state;
|
|
|
|
|
phase = s->ctx_data.rx.data_block_state;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* This calculates the number of data blocks per packet so that
|
|
|
|
@ -343,7 +350,7 @@ static unsigned int calculate_data_blocks(struct amdtp_stream *s,
|
|
|
|
|
data_blocks = 11 * (s->sfc >> 1) + (phase == 0);
|
|
|
|
|
if (++phase >= (80 >> (s->sfc >> 1)))
|
|
|
|
|
phase = 0;
|
|
|
|
|
s->data_block_state = phase;
|
|
|
|
|
s->ctx_data.rx.data_block_state = phase;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -355,9 +362,10 @@ static unsigned int calculate_syt(struct amdtp_stream *s,
|
|
|
|
|
{
|
|
|
|
|
unsigned int syt_offset, phase, index, syt;
|
|
|
|
|
|
|
|
|
|
if (s->last_syt_offset < TICKS_PER_CYCLE) {
|
|
|
|
|
if (s->ctx_data.rx.last_syt_offset < TICKS_PER_CYCLE) {
|
|
|
|
|
if (!cip_sfc_is_base_44100(s->sfc))
|
|
|
|
|
syt_offset = s->last_syt_offset + s->syt_offset_state;
|
|
|
|
|
syt_offset = s->ctx_data.rx.last_syt_offset +
|
|
|
|
|
s->ctx_data.rx.syt_offset_state;
|
|
|
|
|
else {
|
|
|
|
|
/*
|
|
|
|
|
* The time, in ticks, of the n'th SYT_INTERVAL sample is:
|
|
|
|
@ -369,21 +377,21 @@ static unsigned int calculate_syt(struct amdtp_stream *s,
|
|
|
|
|
* 1386 1386 1387 1386 1386 1386 1387 1386 1386 1386 1387 ...
|
|
|
|
|
* This code generates _exactly_ the same sequence.
|
|
|
|
|
*/
|
|
|
|
|
phase = s->syt_offset_state;
|
|
|
|
|
phase = s->ctx_data.rx.syt_offset_state;
|
|
|
|
|
index = phase % 13;
|
|
|
|
|
syt_offset = s->last_syt_offset;
|
|
|
|
|
syt_offset = s->ctx_data.rx.last_syt_offset;
|
|
|
|
|
syt_offset += 1386 + ((index && !(index & 3)) ||
|
|
|
|
|
phase == 146);
|
|
|
|
|
if (++phase >= 147)
|
|
|
|
|
phase = 0;
|
|
|
|
|
s->syt_offset_state = phase;
|
|
|
|
|
s->ctx_data.rx.syt_offset_state = phase;
|
|
|
|
|
}
|
|
|
|
|
} else
|
|
|
|
|
syt_offset = s->last_syt_offset - TICKS_PER_CYCLE;
|
|
|
|
|
s->last_syt_offset = syt_offset;
|
|
|
|
|
syt_offset = s->ctx_data.rx.last_syt_offset - TICKS_PER_CYCLE;
|
|
|
|
|
s->ctx_data.rx.last_syt_offset = syt_offset;
|
|
|
|
|
|
|
|
|
|
if (syt_offset < TICKS_PER_CYCLE) {
|
|
|
|
|
syt_offset += s->transfer_delay;
|
|
|
|
|
syt_offset += s->ctx_data.rx.transfer_delay;
|
|
|
|
|
syt = (cycle + syt_offset / TICKS_PER_CYCLE) << 12;
|
|
|
|
|
syt += syt_offset % TICKS_PER_CYCLE;
|
|
|
|
|
|
|
|
|
@ -457,7 +465,8 @@ static inline int queue_out_packet(struct amdtp_stream *s,
|
|
|
|
|
|
|
|
|
|
static inline int queue_in_packet(struct amdtp_stream *s)
|
|
|
|
|
{
|
|
|
|
|
return queue_packet(s, IR_HEADER_SIZE, s->max_payload_length);
|
|
|
|
|
return queue_packet(s, s->ctx_data.tx.ctx_header_size,
|
|
|
|
|
s->ctx_data.tx.max_payload_length);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int handle_out_packet(struct amdtp_stream *s,
|
|
|
|
@ -484,9 +493,9 @@ static int handle_out_packet(struct amdtp_stream *s,
|
|
|
|
|
((s->sph << CIP_SPH_SHIFT) & CIP_SPH_MASK) |
|
|
|
|
|
s->data_block_counter);
|
|
|
|
|
buffer[1] = cpu_to_be32(CIP_EOH |
|
|
|
|
|
((s->fmt << CIP_FMT_SHIFT) & CIP_FMT_MASK) |
|
|
|
|
|
((s->fdf << CIP_FDF_SHIFT) & CIP_FDF_MASK) |
|
|
|
|
|
(syt & CIP_SYT_MASK));
|
|
|
|
|
((s->fmt << CIP_FMT_SHIFT) & CIP_FMT_MASK) |
|
|
|
|
|
((s->ctx_data.rx.fdf << CIP_FDF_SHIFT) & CIP_FDF_MASK) |
|
|
|
|
|
(syt & CIP_SYT_MASK));
|
|
|
|
|
|
|
|
|
|
if (!(s->flags & CIP_DBC_IS_END_EVENT))
|
|
|
|
|
s->data_block_counter =
|
|
|
|
@ -610,14 +619,14 @@ static int handle_in_packet(struct amdtp_stream *s,
|
|
|
|
|
data_block_counter = s->data_block_counter;
|
|
|
|
|
|
|
|
|
|
if (((s->flags & CIP_SKIP_DBC_ZERO_CHECK) &&
|
|
|
|
|
data_block_counter == s->tx_first_dbc) ||
|
|
|
|
|
data_block_counter == s->ctx_data.tx.first_dbc) ||
|
|
|
|
|
s->data_block_counter == UINT_MAX) {
|
|
|
|
|
lost = false;
|
|
|
|
|
} else if (!(s->flags & CIP_DBC_IS_END_EVENT)) {
|
|
|
|
|
lost = data_block_counter != s->data_block_counter;
|
|
|
|
|
} else {
|
|
|
|
|
if (data_blocks > 0 && s->tx_dbc_interval > 0)
|
|
|
|
|
dbc_interval = s->tx_dbc_interval;
|
|
|
|
|
if (data_blocks > 0 && s->ctx_data.tx.dbc_interval > 0)
|
|
|
|
|
dbc_interval = s->ctx_data.tx.dbc_interval;
|
|
|
|
|
else
|
|
|
|
|
dbc_interval = data_blocks;
|
|
|
|
|
|
|
|
|
@ -740,11 +749,11 @@ static void in_stream_callback(struct fw_iso_context *context, u32 tstamp,
|
|
|
|
|
if (s->packet_index < 0)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
/* The number of packets in buffer */
|
|
|
|
|
packets = header_length / IR_HEADER_SIZE;
|
|
|
|
|
// The number of packets in buffer.
|
|
|
|
|
packets = header_length / s->ctx_data.tx.ctx_header_size;
|
|
|
|
|
|
|
|
|
|
/* For buffer-over-run prevention. */
|
|
|
|
|
max_payload_length = s->max_payload_length;
|
|
|
|
|
max_payload_length = s->ctx_data.tx.max_payload_length;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < packets; i++) {
|
|
|
|
|
u32 iso_header = be32_to_cpu(ctx_header[0]);
|
|
|
|
@ -765,7 +774,7 @@ static void in_stream_callback(struct fw_iso_context *context, u32 tstamp,
|
|
|
|
|
if (s->handle_packet(s, payload_length, cycle, i) < 0)
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
ctx_header += IR_HEADER_SIZE / sizeof(__be32);
|
|
|
|
|
ctx_header += s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Queueing error or detecting invalid payload. */
|
|
|
|
@ -837,7 +846,7 @@ int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed)
|
|
|
|
|
static const struct {
|
|
|
|
|
unsigned int data_block;
|
|
|
|
|
unsigned int syt_offset;
|
|
|
|
|
} initial_state[] = {
|
|
|
|
|
} *entry, initial_state[] = {
|
|
|
|
|
[CIP_SFC_32000] = { 4, 3072 },
|
|
|
|
|
[CIP_SFC_48000] = { 6, 1024 },
|
|
|
|
|
[CIP_SFC_96000] = { 12, 1024 },
|
|
|
|
@ -846,7 +855,7 @@ int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed)
|
|
|
|
|
[CIP_SFC_88200] = { 0, 67 },
|
|
|
|
|
[CIP_SFC_176400] = { 0, 67 },
|
|
|
|
|
};
|
|
|
|
|
unsigned int header_size;
|
|
|
|
|
unsigned int ctx_header_size;
|
|
|
|
|
enum dma_data_direction dir;
|
|
|
|
|
int type, tag, err;
|
|
|
|
|
|
|
|
|
@ -858,23 +867,26 @@ int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed)
|
|
|
|
|
goto err_unlock;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (s->direction == AMDTP_IN_STREAM)
|
|
|
|
|
if (s->direction == AMDTP_IN_STREAM) {
|
|
|
|
|
s->data_block_counter = UINT_MAX;
|
|
|
|
|
else
|
|
|
|
|
} else {
|
|
|
|
|
entry = &initial_state[s->sfc];
|
|
|
|
|
|
|
|
|
|
s->data_block_counter = 0;
|
|
|
|
|
s->data_block_state = initial_state[s->sfc].data_block;
|
|
|
|
|
s->syt_offset_state = initial_state[s->sfc].syt_offset;
|
|
|
|
|
s->last_syt_offset = TICKS_PER_CYCLE;
|
|
|
|
|
s->ctx_data.rx.data_block_state = entry->data_block;
|
|
|
|
|
s->ctx_data.rx.syt_offset_state = entry->syt_offset;
|
|
|
|
|
s->ctx_data.rx.last_syt_offset = TICKS_PER_CYCLE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* initialize packet buffer */
|
|
|
|
|
if (s->direction == AMDTP_IN_STREAM) {
|
|
|
|
|
dir = DMA_FROM_DEVICE;
|
|
|
|
|
type = FW_ISO_CONTEXT_RECEIVE;
|
|
|
|
|
header_size = IR_HEADER_SIZE;
|
|
|
|
|
ctx_header_size = IR_HEADER_SIZE;
|
|
|
|
|
} else {
|
|
|
|
|
dir = DMA_TO_DEVICE;
|
|
|
|
|
type = FW_ISO_CONTEXT_TRANSMIT;
|
|
|
|
|
header_size = OUT_PACKET_HEADER_SIZE;
|
|
|
|
|
ctx_header_size = OUT_PACKET_HEADER_SIZE;
|
|
|
|
|
}
|
|
|
|
|
err = iso_packets_buffer_init(&s->buffer, s->unit, QUEUE_LENGTH,
|
|
|
|
|
amdtp_stream_get_max_payload(s), dir);
|
|
|
|
@ -882,8 +894,8 @@ int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed)
|
|
|
|
|
goto err_unlock;
|
|
|
|
|
|
|
|
|
|
s->context = fw_iso_context_create(fw_parent_device(s->unit)->card,
|
|
|
|
|
type, channel, speed, header_size,
|
|
|
|
|
amdtp_stream_first_callback, s);
|
|
|
|
|
type, channel, speed, ctx_header_size,
|
|
|
|
|
amdtp_stream_first_callback, s);
|
|
|
|
|
if (IS_ERR(s->context)) {
|
|
|
|
|
err = PTR_ERR(s->context);
|
|
|
|
|
if (err == -EBUSY)
|
|
|
|
@ -894,8 +906,11 @@ int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed)
|
|
|
|
|
|
|
|
|
|
amdtp_stream_update(s);
|
|
|
|
|
|
|
|
|
|
if (s->direction == AMDTP_IN_STREAM)
|
|
|
|
|
s->max_payload_length = amdtp_stream_get_max_payload(s);
|
|
|
|
|
if (s->direction == AMDTP_IN_STREAM) {
|
|
|
|
|
s->ctx_data.tx.max_payload_length =
|
|
|
|
|
amdtp_stream_get_max_payload(s);
|
|
|
|
|
s->ctx_data.tx.ctx_header_size = ctx_header_size;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (s->flags & CIP_NO_HEADER)
|
|
|
|
|
s->tag = TAG_NO_CIP_HEADER;
|
|
|
|
|