bcwc_pcie: simplify ringbuffer handling

This commit is contained in:
Sven Schnelle
2015-11-23 15:45:04 +01:00
parent c29ff9e9bf
commit 58a71ab52b
4 changed files with 95 additions and 53 deletions

View File

@@ -178,13 +178,9 @@ static void io_t2h_handler(struct bcwc_private *dev_priv,
static void bcwc_handle_irq(struct bcwc_private *dev_priv, struct fw_channel *chan)
{
struct bcwc_ringbuf_entry *entry;
int i = 0;
pr_debug("Interrupt from channel source %d, type %d [%s]\n", chan->source, chan->type, chan->name);
while(bcwc_channel_ringbuf_entry_available(dev_priv, chan) && i++ < 500) {
entry = bcwc_channel_ringbuf_get_entry(dev_priv, chan);
while((entry = bcwc_channel_ringbuf_receive(dev_priv, chan))) {
pr_debug("channel %s: message available, address %08x\n", chan->name, entry->address_flags);
if (chan == dev_priv->channel_shared_malloc) {
sharedmalloc_handler(dev_priv, chan, entry);
} else if (chan == dev_priv->channel_terminal) {
@@ -338,7 +334,6 @@ static int bcwc_pci_probe(struct pci_dev *pdev,
}
spin_lock_init(&dev_priv->io_lock);
spin_lock_init(&dev_priv->rb_lock);
mutex_init(&dev_priv->vb2_queue_lock);
mutex_init(&dev_priv->ioctl_lock);

View File

@@ -65,14 +65,14 @@ void bcwc_channel_ringbuf_init(struct bcwc_private *dev_priv, struct fw_channel
entry = (struct bcwc_ringbuf_entry *)chan->ringbuf.virt_addr;
pr_debug("clearing ringbuf %s at %p (size %d)\n", chan->name, entry, chan->size);
spin_lock_irq(&dev_priv->rb_lock);
spin_lock_irq(&chan->lock);
for(i = 0; i < chan->size; i++) {
entry->address_flags = 1;
entry->request_size = 0;
entry->response_size = 0;
entry++;
}
spin_unlock_irq(&dev_priv->rb_lock);
spin_unlock_irq(&chan->lock);
}
}
@@ -80,52 +80,57 @@ struct bcwc_ringbuf_entry *bcwc_channel_ringbuf_send(struct bcwc_private *dev_pr
u32 data_offset, u32 request_size, u32 response_size)
{
struct bcwc_ringbuf_entry *entry;
int pos = chan->ringbuf.idx;
entry = get_entry_addr(dev_priv, chan, chan->ringbuf.idx++);
if (chan->ringbuf.idx >= chan->size) {
pr_debug("%s: reset tx pointer\n", chan->name);
chan->ringbuf.idx = 0;
pr_debug("send %08x\n", data_offset);
spin_lock_irq(&chan->lock);
entry = get_entry_addr(dev_priv, chan, chan->ringbuf.idx);
if (chan->tx_lock) {
spin_unlock_irq(&chan->lock);
return NULL;
}
pr_debug("%s: send entry %p offset %08x pos %d\n", chan->name, entry, data_offset, pos);
spin_lock_irq(&dev_priv->rb_lock);
if (chan->type != FW_CHAN_TYPE_OUT && ++chan->ringbuf.idx >= chan->size)
chan->ringbuf.idx = 0;
chan->tx_lock = 1;
chan->rx_lock = 0;
entry->request_size = request_size;
entry->response_size = response_size;
entry->address_flags = data_offset | (chan->type == 0 ? 0 : 1);
spin_unlock_irq(&dev_priv->rb_lock);
// pr_debug("address_flags %x, request size %x response size %x\n",
// entry->address_flags, entry->request_size, entry->response_size);
wmb();
entry->address_flags = data_offset | (chan->type == 0 ? 0 : 1);
spin_unlock_irq(&chan->lock);
spin_lock_irq(&dev_priv->io_lock);
BCWC_ISP_REG_WRITE(0x10 << chan->source, ISP_REG_41020);
spin_unlock_irq(&dev_priv->io_lock);
return entry;
}
struct bcwc_ringbuf_entry *bcwc_channel_ringbuf_get_entry(struct bcwc_private *dev_priv,
struct bcwc_ringbuf_entry *bcwc_channel_ringbuf_receive(struct bcwc_private *dev_priv,
struct fw_channel *chan)
{
struct bcwc_ringbuf_entry *entry;
struct bcwc_ringbuf_entry *entry, *ret = NULL;
spin_lock_irq(&chan->lock);
if (chan->rx_lock)
goto out;
spin_lock_irq(&dev_priv->rb_lock);
entry = get_entry_addr(dev_priv, chan, chan->ringbuf.idx);
if (chan->ringbuf.idx > chan->size)
if (!(entry->address_flags & 1) ^ (chan->type != 0))
goto out;
ret = entry;
if (chan->type == FW_CHAN_TYPE_OUT && ++chan->ringbuf.idx >= chan->size)
chan->ringbuf.idx = 0;
spin_unlock_irq(&dev_priv->rb_lock);
return entry;
}
int bcwc_channel_ringbuf_entry_available(struct bcwc_private *dev_priv,
struct fw_channel *chan)
{
struct bcwc_ringbuf_entry *entry;
int ret;
spin_lock_irq(&dev_priv->rb_lock);
entry = get_entry_addr(dev_priv, chan, chan->ringbuf.idx);
ret = !(entry->address_flags & 1) ^ (chan->type == 0);
spin_unlock_irq(&dev_priv->rb_lock);
chan->rx_lock = 1;
chan->tx_lock = 0;
out:
spin_unlock_irq(&chan->lock);
return ret;
}

View File

@@ -48,7 +48,7 @@ extern struct bcwc_ringbuf_entry *bcwc_channel_ringbuf_get_entry(struct bcwc_pri
extern struct bcwc_ringbuf_entry *bcwc_channel_ringbuf_send(struct bcwc_private *dev_priv, struct fw_channel *chan,
u32 data_offset, u32 request_size, u32 response_size);
extern int bcwc_channel_ringbuf_entry_available(struct bcwc_private *dev_priv,
struct fw_channel *chan);
struct bcwc_ringbuf_entry *bcwc_channel_ringbuf_receive(struct bcwc_private *dev_priv,
struct fw_channel *chan);
#endif

View File

@@ -112,6 +112,32 @@ static void bcwc_buffer_cleanup(struct vb2_buffer *vb)
ctx->dma_desc_obj = NULL;
}
static int bcwc_send_h2t_buffer(struct bcwc_private *dev_priv, struct h2t_buf_ctx *ctx)
{
volatile struct bcwc_ringbuf_entry *entry;
// pr_debug("sending buffer %p\n", ctx->vb);
entry = bcwc_channel_ringbuf_send(dev_priv, dev_priv->channel_buf_h2t,
ctx->dma_desc_obj->offset, 0x180, 0x30000000);
if (!entry)
return -EIO;
if (wait_event_interruptible_timeout(ctx->wq, ctx->done, HZ) <= 0) {
dev_err(&dev_priv->pdev->dev, "timeout wait for buffer %p\n", ctx->vb);
return -ETIMEDOUT;
}
ctx->done = 0;
return 0;
}
void bcwc_buffer_queued_handler(struct bcwc_private *dev_priv, struct dma_descriptor_list *list)
{
struct h2t_buf_ctx *ctx = (struct h2t_buf_ctx *)list->desc[0].tag;
ctx->done = 1;
wake_up_interruptible(&ctx->wq);
}
static void bcwc_buffer_queue(struct vb2_buffer *vb)
{
struct bcwc_private *dev_priv = vb2_get_drv_priv(vb->vb2_queue);
@@ -149,10 +175,12 @@ static void bcwc_buffer_queue(struct vb2_buffer *vb)
pr_debug("%d: field0: %d, count %d, pool %d, addr0 0x%08x, addr1 0x%08x tag 0x%08llx vb = %p\n", i, list->field0,
list->desc[i].count, list->desc[i].pool, list->desc[i].addr0, list->desc[i].addr1, list->desc[i].tag, ctx->vb);
bcwc_channel_ringbuf_send(dev_priv, dev_priv->channel_buf_h2t,
ctx->dma_desc_obj->offset, 0x180, 0x30000000);
if (bcwc_send_h2t_buffer(dev_priv, ctx)) {
vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
ctx->state = BUF_ALLOC;
}
}
return;
}
static int bcwc_buffer_prepare(struct vb2_buffer *vb)
@@ -209,6 +237,7 @@ static int bcwc_buffer_prepare(struct vb2_buffer *vb)
dma_list->desc[0].addr2 = (ctx->plane[2]->offset << 12) | 0xc0000000;
dma_list->desc[0].tag = (u64)ctx;
init_waitqueue_head(&ctx->wq);
return 0;
}
@@ -270,9 +299,11 @@ static int bcwc_start_streaming(struct vb2_queue *vq, unsigned int count)
if (ctx->state != BUF_DRV_QUEUED)
continue;
ctx->state = BUF_HW_QUEUED;
bcwc_channel_ringbuf_send(dev_priv, dev_priv->channel_buf_h2t,
ctx->dma_desc_obj->offset, 0x180, 0x30000000);
if (bcwc_send_h2t_buffer(dev_priv, ctx)) {
vb2_buffer_done(ctx->vb, VB2_BUF_STATE_ERROR);
ctx->state = BUF_ALLOC;
}
ctx->state = BUF_HW_QUEUED;
}
return 0;
@@ -281,14 +312,25 @@ static int bcwc_start_streaming(struct vb2_queue *vq, unsigned int count)
static void bcwc_stop_streaming(struct vb2_queue *vq)
{
struct bcwc_private *dev_priv = vb2_get_drv_priv(vq);
struct h2t_buf_ctx *ctx;
int ret, i;
pr_debug("%s\n", __FUNCTION__);
bcwc_isp_cmd_channel_buffer_return(dev_priv, 0);
pr_debug("waiting for buffers...\n");
vb2_wait_for_all_buffers(vq);
pr_debug("done\n");
bcwc_isp_cmd_channel_stop(dev_priv);
ret = bcwc_stop_channel(dev_priv, 0);
if (!ret) {
pr_debug("waiting for buffers...\n");
vb2_wait_for_all_buffers(vq);
pr_debug("done\n");
} else {
/* Firmware doesn't respond. */
for(i = 0; i < BCWC_BUFFERS;i++) {
ctx = dev_priv->h2t_bufs + i;
if (ctx->state == BUF_DRV_QUEUED || ctx->state == BUF_HW_QUEUED) {
vb2_buffer_done(ctx->vb, VB2_BUF_STATE_DONE);
ctx->vb = NULL;
ctx->state = BUF_ALLOC;
}
}
}
}
static struct vb2_ops vb2_queue_ops = {