#ifndef __INDUSTRIALIO_DMA_BUFFER_H__
#define __INDUSTRIALIO_DMA_BUFFER_H__
#include <linux/atomic.h>
#include <linux/list.h>
#include <linux/kref.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/iio/buffer_impl.h>
struct iio_dma_buffer_queue;
struct iio_dma_buffer_ops;
struct device;
struct dma_buf_attachment;
struct dma_fence;
struct sg_table;
enum iio_block_state {
IIO_BLOCK_STATE_QUEUED,
IIO_BLOCK_STATE_ACTIVE,
IIO_BLOCK_STATE_DONE,
IIO_BLOCK_STATE_DEAD,
};
struct iio_dma_buffer_block {
struct list_head head;
size_t bytes_used;
void *vaddr;
dma_addr_t phys_addr;
size_t size;
struct iio_dma_buffer_queue *queue;
struct kref kref;
enum iio_block_state state;
bool cyclic;
bool fileio;
struct sg_table *sg_table;
struct dma_fence *fence;
};
struct iio_dma_buffer_queue_fileio {
struct iio_dma_buffer_block *blocks[2];
struct iio_dma_buffer_block *active_block;
size_t pos;
size_t block_size;
unsigned int next_dequeue;
bool enabled;
};
struct iio_dma_buffer_queue {
struct iio_buffer buffer;
struct device *dev;
const struct iio_dma_buffer_ops *ops;
struct mutex lock;
spinlock_t list_lock;
struct list_head incoming;
bool active;
atomic_t num_dmabufs;
struct iio_dma_buffer_queue_fileio fileio;
};
struct iio_dma_buffer_ops {
int (*submit)(struct iio_dma_buffer_queue *queue,
struct iio_dma_buffer_block *block);
void (*abort)(struct iio_dma_buffer_queue *queue);
};
void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block);
void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
struct list_head *list);
int iio_dma_buffer_enable(struct iio_buffer *buffer, struct iio_dev *indio_dev);
int iio_dma_buffer_disable(struct iio_buffer *buffer,
struct iio_dev *indio_dev);
int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
char __user *user_buffer);
int iio_dma_buffer_write(struct iio_buffer *buffer, size_t n,
const char __user *user_buffer);
size_t iio_dma_buffer_usage(struct iio_buffer *buffer);
int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd);
int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length);
int iio_dma_buffer_request_update(struct iio_buffer *buffer);
void iio_dma_buffer_init(struct iio_dma_buffer_queue *queue, struct device *dev,
const struct iio_dma_buffer_ops *ops);
void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue);
void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue);
struct iio_dma_buffer_block *
iio_dma_buffer_attach_dmabuf(struct iio_buffer *buffer,
struct dma_buf_attachment *attach);
void iio_dma_buffer_detach_dmabuf(struct iio_buffer *buffer,
struct iio_dma_buffer_block *block);
int iio_dma_buffer_enqueue_dmabuf(struct iio_buffer *buffer,
struct iio_dma_buffer_block *block,
struct dma_fence *fence,
struct sg_table *sgt,
size_t size, bool cyclic);
void iio_dma_buffer_lock_queue(struct iio_buffer *buffer);
void iio_dma_buffer_unlock_queue(struct iio_buffer *buffer);
struct device *iio_dma_buffer_get_dma_dev(struct iio_buffer *buffer);
#endif