DHP: CF1 Adds SGT mode for scatter memory mapping. [2/2]
PD#SWPL-194915
Problem:
Adds SGT mode for scatter memory mapping.
Solution:
1.supports sg-table mapping.
2.Supports memory cache/non-cache mode configuration.
3.Supports dmabuf/du-memory, cpu/device RW data sync operation.
4.Fixed mmap not being able to share memory with other
threads due to private mappings.
depends on CL:
486083
prebuild:
Change-Id:I393a3da1e
78a4fc76884e21be2959e37b9947f122
Verify:
ohm
Change-Id: I99dc9a2dec5a390ca7b480c3f75f446ae100cdb7
Signed-off-by: Nanxin Qin <nanxin.qin@amlogic.com>
diff --git a/drivers/frame_provider/aml_dhp/aml_dhp_drv.c b/drivers/frame_provider/aml_dhp/aml_dhp_drv.c
index 9520dc6..b13fcc6 100644
--- a/drivers/frame_provider/aml_dhp/aml_dhp_drv.c
+++ b/drivers/frame_provider/aml_dhp/aml_dhp_drv.c
@@ -46,6 +46,8 @@
#include <linux/types.h>
#include <linux/kref.h>
#include <linux/err.h>
+#include <linux/hashtable.h>
+#include <linux/radix-tree.h>
#include "aml_dhp_drv.h"
#include "aml_dhp_if.h"
@@ -91,6 +93,30 @@
u32 inst_cnt;
};
+/*
+ * struct pfn_hashtable - Represents a hash table that stores PFNs.
+ *
+ * @tbl : Used to store PFN entries.
+ * @pfn_count : The total count of unique PFNs in the hashtable.
+ */
+struct pfn_hashtable {
+ DECLARE_HASHTABLE(tbl, 10);
+ u32 pfn_count;
+};
+
+/*
+ * struct pfn_node - Represents a single entry in the PFN hashtable.
+ *
+ * @pfn : The PFN (Page Frame Number) being stored in the hashtable.
+ * @uptr : A user-space pointer (or other associated data) related to the PFN.
+ * @hash : A hash list node, part of the hash table entry.
+ */
+struct pfn_node {
+ u64 pfn;
+ ulong uptr;
+ struct hlist_node hash;
+};
+
static struct aml_dhp_dev *g_dev;
static u32 debug;
@@ -111,10 +137,14 @@
{
struct data_unit *du = buf_priv;
struct aml_dhp_drv *drv = du->priv;
+ struct sg_table *sgt = &du->sg_tbl;
if (!refcount_dec_and_test(&du->refcnt))
return;
+ if (!du->uncached)
+ dma_unmap_sg(drv->dev, sgt->sgl, sgt->orig_nents, DMA_BIDIRECTIONAL);
+
sg_free_table(&du->sg_tbl);
put_device(du->dev);
@@ -175,12 +205,68 @@
.close = aml_dhp_vm_close,
};
+/*
+ * struct aml_dhp_attachment - Represents an attachment for the DHP (Data Handler Proxy)
+ * device to a device and scatter-gather list (SG).
+ *
+ * @list : A list entry to link this attachment to other attachments (typically
+ * in a list of active attachments for a device).
+ * @dev : Pointer to the associated device structure that this attachment is linked to.
+ * @sgt : Scatter-gather table representing the memory mapping for this
+ * attachment. It describes the physical memory buffers to be used.
+ * @dma_dir : Direction of DMA data transfer (e.g., DMA_TO_DEVICE, DMA_FROM_DEVICE).
+ * @mapped : Boolean flag indicating whether the SG table is currently mapped for DMA operations.
+ * @uncached : Boolean flag indicating whether the SG table's memory is uncached.
+ */
struct aml_dhp_attachment {
- struct sg_table sgt;
+ struct list_head list;
+ struct device *dev;
+ struct sg_table *sgt;
enum dma_data_direction dma_dir;
+ bool mapped;
+ bool uncached;
};
/*
+ * dup_sg_table - Duplicates a scatter-gather table (SG table).
+ *
+ * @table : The original scatter-gather table to be duplicated.
+ *
+ * This function creates a new scatter-gather table (`new_table`), allocates memory for
+ * it, and copies the entries from the original scatter-gather table (`table`) to the
+ * new table.
+ * The function iterates over each scatter-gather entry in the original table and
+ * copies the page, length, and offset information to the new table.
+ *
+ * Return: A pointer to the newly created scatter-gather table, or an error pointer
+ * in case of failure.
+ */
+static struct sg_table *dup_sg_table(struct sg_table *table)
+{
+ struct sg_table *new_table;
+ int ret, i;
+ struct scatterlist *sg, *new_sg;
+
+ new_table = kmalloc(sizeof(*new_table), GFP_KERNEL);
+ if (!new_table)
+ return ERR_PTR(-ENOMEM);
+
+ ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL);
+ if (ret) {
+ kfree(new_table);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ new_sg = new_table->sgl;
+ for_each_sgtable_sg(table, sg, i) {
+ sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset);
+ new_sg = sg_next(new_sg);
+ }
+
+ return new_table;
+}
+
+/*
* aml_dhp_dbuf_attach - Attaches a DMA buffer to the given buffer attachment.
*
* @dbuf : Pointer to the DMA buffer structure.
@@ -194,35 +280,31 @@
static int aml_dhp_dbuf_attach(struct dma_buf *dbuf,
struct dma_buf_attachment *dbuf_attach)
{
- struct aml_dhp_attachment *attach;
- unsigned int i;
- struct scatterlist *rd, *wr;
- struct sg_table *sgt;
struct data_unit *du = dbuf->priv;
- int ret;
+ struct aml_dhp_attachment *attach;
+ struct sg_table *table;
- attach = kzalloc(sizeof(*attach), GFP_KERNEL);
+ attach = kmalloc(sizeof(*attach), GFP_KERNEL);
if (!attach)
return -ENOMEM;
- sgt = &attach->sgt;
-
- ret = sg_alloc_table(sgt, du->sg_tbl.orig_nents, GFP_KERNEL);
- if (ret) {
+ table = dup_sg_table(&du->sg_tbl);
+ if (IS_ERR(table)) {
kfree(attach);
return -ENOMEM;
}
- rd = du->sg_tbl.sgl;
- wr = sgt->sgl;
- for (i = 0; i < sgt->orig_nents; ++i) {
- sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
- rd = sg_next(rd);
- wr = sg_next(wr);
- }
+ INIT_LIST_HEAD(&attach->list);
+ attach->sgt = table;
+ attach->dev = dbuf_attach->dev;
+ attach->uncached = du->uncached;
+ attach->dma_dir = DMA_NONE;
+ attach->mapped = false;
+ dbuf_attach->priv = attach;
- attach->dma_dir = DMA_NONE;
- dbuf_attach->priv = attach;
+ mutex_lock(&du->lock);
+ list_add(&attach->list, &du->attachments);
+ mutex_unlock(&du->lock);
return 0;
}
@@ -240,24 +322,22 @@
static void aml_dhp_dbuf_detach(struct dma_buf *dbuf,
struct dma_buf_attachment *db_attach)
{
+ struct data_unit *du = dbuf->priv;
struct aml_dhp_attachment *attach = db_attach->priv;
- struct sg_table *sgt;
if (!attach)
return;
- sgt = &attach->sgt;
+ mutex_lock(&du->lock);
+ list_del(&attach->list);
+ mutex_unlock(&du->lock);
- /* release the scatterlist cache */
- if (attach->dma_dir != DMA_NONE) {
- dma_unmap_sgtable(db_attach->dev, sgt,
- attach->dma_dir,
- DMA_ATTR_SKIP_CPU_SYNC);
- }
-
- sg_free_table(sgt);
+ sg_free_table(attach->sgt);
+ kfree(attach->sgt);
kfree(attach);
+
db_attach->priv = NULL;
+
}
/*
@@ -275,17 +355,19 @@
enum dma_data_direction dma_dir)
{
struct aml_dhp_attachment *attach = db_attach->priv;
+ ulong attr = 0;
/* stealing dmabuf mutex to serialize map/unmap operations */
#if LINUX_VERSION_CODE <= KERNEL_VERSION(6, 2, 0)
struct mutex *lock = &db_attach->dmabuf->lock;
#endif
- struct sg_table *sgt;
+ struct sg_table *sgt = attach->sgt;
#if LINUX_VERSION_CODE <= KERNEL_VERSION(6, 2, 0)
mutex_lock(lock);
#endif
+ if (attach->uncached)
+ attr = DMA_ATTR_SKIP_CPU_SYNC;
- sgt = &attach->sgt;
/* return previously mapped sg table */
if (attach->dma_dir == dma_dir) {
#if LINUX_VERSION_CODE <= KERNEL_VERSION(6, 2, 0)
@@ -294,16 +376,8 @@
return sgt;
}
- /* release any previous cache */
- if (attach->dma_dir != DMA_NONE) {
- dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir,
- DMA_ATTR_SKIP_CPU_SYNC);
- attach->dma_dir = DMA_NONE;
- }
-
/* mapping to the client with new direction. */
- if (dma_map_sgtable(db_attach->dev, sgt, dma_dir,
- DMA_ATTR_SKIP_CPU_SYNC)) {
+ if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, attr)) {
LOG_ERR("failed to map scatterlist\n");
#if LINUX_VERSION_CODE <= KERNEL_VERSION(6, 2, 0)
mutex_unlock(lock);
@@ -312,11 +386,10 @@
}
attach->dma_dir = dma_dir;
-
+ attach->mapped = true;
#if LINUX_VERSION_CODE <= KERNEL_VERSION(6, 2, 0)
mutex_unlock(lock);
#endif
-
return sgt;
}
@@ -333,7 +406,18 @@
static void aml_dhp_dbuf_unmap(struct dma_buf_attachment *db_attach,
struct sg_table *sgt, enum dma_data_direction dma_dir)
{
- /* nothing to be done here */
+ struct aml_dhp_attachment *attach = db_attach->priv;
+ ulong attr = 0;
+
+ if (attach->uncached)
+ attr = DMA_ATTR_SKIP_CPU_SYNC;
+
+ /* release the scatterlist cache */
+ if (attach->dma_dir != DMA_NONE) {
+ dma_unmap_sgtable(db_attach->dev, sgt, dma_dir, attr);
+ }
+
+ attach->mapped = false;
}
/*
@@ -357,7 +441,13 @@
int ret;
if (!du) {
- LOG_ERR("No buffer to map\n");
+ LOG_ERR("[%u]: No buffer to map\n", drv->uid);
+ return -EINVAL;
+ }
+
+ if (!IS_ALIGNED(vma->vm_start, PAGE_SIZE)) {
+ LOG_ERR("[%u]: VMA start address not aligned: %lx\n",
+ drv->uid, vma->vm_start);
return -EINVAL;
}
@@ -372,15 +462,17 @@
page_to_pfn(page),
PAGE_SIZE,
vma->vm_page_prot);
- if (ret)
+ if (ret) {
+ LOG_ERR("[%u]: Failed to map page at VMA addr: %lx, PFN: %lx, error: %d\n",
+ drv->uid, addr, page_to_pfn(page), ret);
return ret;
+ }
addr += PAGE_SIZE;
if (addr >= vma->vm_end)
break;
}
-
#if LINUX_VERSION_CODE <= KERNEL_VERSION(6, 3, 0)
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
#else
@@ -392,10 +484,12 @@
vma->vm_ops->open(vma);
- LOG_DEBUG("[%u]: Mapped addr:%llx at %lx, size %u\n",
- drv->uid, (u64)sg_phys(sgt->sgl),
+ du->mapped = true;
+
+ LOG_DEBUG("[%u]: DBUF Mapped addr:%lx at VMA start %lx, size %lu\n",
+ drv->uid, sg_phys(sgt->sgl),
vma->vm_start,
- sgt->sgl->length);
+ addr - vma->vm_start);
return 0;
}
@@ -415,11 +509,116 @@
return __aml_dhp_dbuf_mmap(dbuf->priv, vma);
}
+/*
+ * aml_dhp_dbuf_begin_cpu_access - Synchronize DMA buffer for CPU access.
+ *
+ * @dbuf : Pointer to the DMA buffer structure.
+ * @direction: The direction of data transfer (DMA_FROM_DEVICE, DMA_TO_DEVICE, etc.).
+ *
+ * This function ensures the DMA buffer's contents are synchronized and safe for CPU access.
+ * If the DMA buffer is mapped to user-space or attached to devices, appropriate
+ * synchronization is performed for the specified access direction.
+ *
+ * Return:
+ * 0 on success, negative error code on failure.
+ */
+static int aml_dhp_dbuf_begin_cpu_access(struct dma_buf *dbuf,
+ enum dma_data_direction direction)
+{
+ struct data_unit *du = dbuf->priv;
+ struct aml_dhp_drv *drv = du->priv;
+ struct aml_dhp_attachment *attach;
+
+ /* Validate direction parameter */
+ if (!valid_dma_direction(direction)) {
+ LOG_ERR("[%u]: Invalid direction: %d\n", drv->uid, direction);
+ return -EINVAL;
+ }
+
+ LOG_DEBUG("[%u]: %s, uncached:%d, mapped:%d, dirt %d\n",
+ drv->uid, __func__, du->uncached, du->mapped, direction);
+
+ mutex_lock(&du->lock);
+
+ /* Sync for process mmap access */
+ if (!du->uncached && du->mapped) {
+ LOG_DEBUG("[%u]: Syncing dmabuf for CPU access\n", drv->uid);
+ dma_sync_sgtable_for_cpu(du->dev, &du->sg_tbl, direction);
+ }
+
+ /* Sync for attached devices */
+ list_for_each_entry(attach, &du->attachments, list) {
+ if (!attach->mapped)
+ continue;
+
+ LOG_DEBUG("[%u]: Syncing attachment for CPU access\n", drv->uid);
+ dma_sync_sgtable_for_cpu(attach->dev, attach->sgt, direction);
+ }
+
+ mutex_unlock(&du->lock);
+
+ return 0;
+}
+
+
+/*
+ * aml_dhp_dbuf_end_cpu_access - Synchronize DMA buffer after CPU access.
+ *
+ * @dbuf : Pointer to the DMA buffer structure.
+ * @direction: The direction of data transfer (DMA_FROM_DEVICE, DMA_TO_DEVICE, etc.).
+ *
+ * This function ensures the DMA buffer's contents are synchronized after CPU access,
+ * making it safe for use by devices. Appropriate synchronization is performed for
+ * the specified access direction, whether for user-space mappings or attached devices.
+ *
+ * Return:
+ * 0 on success, negative error code on failure.
+ */
+static int aml_dhp_dbuf_end_cpu_access(struct dma_buf *dbuf,
+ enum dma_data_direction direction)
+{
+ struct data_unit *du = dbuf->priv;
+ struct aml_dhp_drv *drv = du->priv;
+ struct aml_dhp_attachment *attach;
+
+ /* Validate direction parameter */
+ if (!valid_dma_direction(direction)) {
+ LOG_ERR("[%u]: Invalid direction: %d\n", drv->uid, direction);
+ return -EINVAL;
+ }
+
+ LOG_DEBUG("[%u]: %s, uncached:%d, mapped:%d, dirt %d\n",
+ drv->uid, __func__, du->uncached, du->mapped, direction);
+
+ mutex_lock(&du->lock);
+
+ /* Sync for process mmap access */
+ if (!du->uncached && du->mapped) {
+ LOG_DEBUG("[%u]: Syncing dmabuf for device access\n", drv->uid);
+ dma_sync_sgtable_for_device(du->dev, &du->sg_tbl, direction);
+ }
+
+ /* Sync for attached devices */
+ list_for_each_entry(attach, &du->attachments, list) {
+ if (!attach->mapped)
+ continue;
+
+ LOG_DEBUG("[%u]: Syncing attachment for device access\n", drv->uid);
+ dma_sync_sgtable_for_device(attach->dev, attach->sgt, direction);
+ }
+
+ mutex_unlock(&du->lock);
+
+ return 0;
+}
+
static const struct dma_buf_ops aml_dhp_dbuf_ops = {
.attach = aml_dhp_dbuf_attach,
.detach = aml_dhp_dbuf_detach,
.map_dma_buf = aml_dhp_dbuf_map,
.unmap_dma_buf = aml_dhp_dbuf_unmap,
+ .begin_cpu_access = aml_dhp_dbuf_begin_cpu_access,
+ .end_cpu_access = aml_dhp_dbuf_end_cpu_access,
.mmap = aml_dhp_dbuf_mmap,
.release = aml_dhp_dbuf_release,
};
@@ -440,13 +639,43 @@
{
struct aml_dhp_drv *drv = du->priv;
struct sg_table *sgt = &du->sg_tbl;
- struct dma_buf *dbuf;
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+ struct dma_buf *dbuf;
+ u32 sg_size = size;
+ int pages = 0;
+ int ents = 0;
+ int i;
- if (sg_alloc_table(sgt, 1, GFP_KERNEL))
+ if (!addr || size == 0 || !IS_ALIGNED(addr, PAGE_SIZE)) {
+ LOG_ERR("[%u]: Invalid address or size: addr=%lx, size=%u\n",
+ drv->uid, addr, size);
return NULL;
+ }
- sg_set_page(sgt->sgl, pfn_to_page(PFN_DOWN(addr)), size, 0);
+ pages = DIV_ROUND_UP(size, PAGE_SIZE);
+ if (sg_alloc_table(sgt, pages, GFP_KERNEL)) {
+ LOG_ERR("[%u]: Failed to allocate sg_table for %d pages\n",
+ drv->uid, pages);
+ return NULL;
+ }
+
+ for (i = 0; i < pages; i++) {
+ struct page *pg = pfn_to_page(PFN_DOWN(addr) + i);
+ u32 sz = (sg_size < PAGE_SIZE) ? sg_size : PAGE_SIZE;
+
+ sg_set_page(&sgt->sgl[i], pg, sz, 0);
+
+ sg_size -= sz;
+ }
+
+ if (!du->uncached) {
+ ents = dma_map_sg(drv->dev, sgt->sgl, sgt->orig_nents, DMA_BIDIRECTIONAL);
+ if (ents < 0) {
+ LOG_ERR("[%u]: dma map sg failed, ret=\n", drv->uid, ents);
+ sg_free_table(sgt);
+ return NULL;
+ }
+ }
exp_info.ops = &aml_dhp_dbuf_ops;
exp_info.size = size;
@@ -455,6 +684,7 @@
dbuf = dma_buf_export(&exp_info);
if (IS_ERR(dbuf)) {
+ LOG_ERR("[%u]: dma_buf export failed\n", drv->uid);
sg_free_table(sgt);
return NULL;
}
@@ -535,16 +765,17 @@
/**
* aml_dhp_alloc_fd - Allocates a file descriptor for a DMA buffer.
- * @du: Pointer to the data unit structure.
- * @addr: Address of the DMA buffer.
- * @size: Size of the DMA buffer.
+ * @du: Pointer to the data unit structure.
+ * @addr: Address of the DMA buffer.
+ * @size: Size of the DMA buffer.
+ * @uncached: Indicating whether the buffer should be allocated as uncached memory.
*
* This function allocates a file descriptor for a DMA buffer associated with
* the data unit, initializing the necessary structures and reference counts.
*
* Return: The allocated file descriptor on success or a negative error code on failure.
*/
-static int aml_dhp_alloc_fd(struct data_unit *du, ulong addr, u32 size)
+static int aml_dhp_alloc_fd(struct data_unit *du, ulong addr, u32 size, bool uncached)
{
struct aml_dhp_drv *drv = du->priv;
struct dma_buf *dbuf;
@@ -562,6 +793,7 @@
}
du->dbuf = dbuf;
+ du->uncached = uncached;
aml_dhp_vma_hdr_init(du);
@@ -698,7 +930,7 @@
switch (m->type) {
case AML_MEM_TYPE_PFN: {
- addr = (ulong)pfn_to_kaddr(m->pfn);
+ addr = (ulong)__pfn_to_phys(m->pfn);
break;
}
case AML_MEM_TYPE_PHY_ADDR: {
@@ -716,17 +948,16 @@
return addr;
}
-/**
- * get_du_mem_size - Get the size of the memory block from aml_du_mem.
- * @m: Pointer to the aml_du_mem structure.
- *
- * Return: Size of the memory block.
- */
static u32 get_du_mem_size(struct aml_du_mem *m)
{
return m->size;
}
+static u32 get_du_mem_cache_mode(struct aml_du_mem *m)
+{
+ return m->uncached;
+}
+
/**
* du_alloc_fd - Allocate a file descriptor for a data unit (DU) memory.
* @drv: Pointer to the aml_dhp_drv structure (driver).
@@ -743,6 +974,7 @@
struct aml_dhp_ioctl_data io;
struct aml_du_base *base;
struct data_unit *du;
+ bool uncached = false;
ulong addr = 0;
u32 size = 0;
int fd = -1;
@@ -769,7 +1001,9 @@
return -EFAULT;
}
- fd = aml_dhp_alloc_fd(du, addr, size);
+ uncached = !!get_du_mem_cache_mode(&base->src);
+
+ fd = aml_dhp_alloc_fd(du, addr, size, uncached);
if (fd < 0) {
ret = fd;
goto err;
@@ -803,35 +1037,39 @@
* __remap_uptr - Remap PFNs to user-space memory.
* @pfn: Page Frame Number to remap.
* @len: Length of the memory region.
+ * @uncached: Enable uncached mode.
*
* This function allocates anonymous user-space memory and remaps the physical page frames (PFNs)
* into the user-space virtual address space.
*
* Return: User-space pointer to the remapped memory, or 0 on failure.
*/
-static ulong __remap_uptr(ulong pfn, int len)
+static ulong __remap_uptr(ulong pfn, int len, bool uncached)
{
struct vm_area_struct *vma = NULL;
ulong prot = PROT_READ | PROT_WRITE;
- ulong flags = MAP_ANONYMOUS | MAP_PRIVATE;
+ ulong flags = MAP_ANONYMOUS | MAP_SHARED;
int ulen = PAGE_ALIGN(len);
ulong uptr = 0;
- // Attempt to allocate user space memory using vm_mmap
+ /* Attempt to allocate user space memory using vm_mmap */
uptr = vm_mmap(NULL, 0, ulen, prot, flags, 0);
if (IS_ERR_VALUE(uptr)) {
LOG_ERR("Failed to allocate user space memory\n");
return 0;
}
- // Find the virtual memory area (VMA) corresponding to the newly allocated memory
+ /* Find the virtual memory area (VMA) corresponding to the newly allocated memory */
vma = find_vma(current->mm, uptr);
if (!vma || uptr < vma->vm_start || uptr + ulen > vma->vm_end) {
LOG_ERR("Invalid VMA or address range!\n");
goto free_uptr;
}
- // Attempt to map the physical page frames (PFN) to user space memory
+ if (uncached)
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+ /* Attempt to map the physical page frames (PFN) to user space memory */
if (remap_pfn_range(vma, uptr, pfn, ulen, vma->vm_page_prot)) {
LOG_ERR("Failed to remap pfn range\n");
goto free_uptr;
@@ -859,6 +1097,7 @@
{
struct aml_dhp_ioctl_data __user *uarg = (void *)arg;
struct aml_dhp_ioctl_data io;
+ bool uncached = false;
ulong uptr = 0;
ulong pfn = 0;
u32 size = 0;
@@ -880,7 +1119,9 @@
return -EFAULT;
}
- uptr = __remap_uptr(pfn, size);
+ uncached = !!get_du_mem_cache_mode(&io.mem);
+
+ uptr = __remap_uptr(pfn, size, uncached);
if (!uptr && IS_ERR_VALUE(uptr)) {
LOG_ERR("Failed to remap userspace addr.\n");
return -EFAULT;
@@ -894,13 +1135,477 @@
return -EFAULT;
}
- LOG_TRACE("[%u]: PFN:%lx is mapped to the uptr:%lx, size:%u\n",
+ LOG_DEBUG("[%u]: DU Mapped addr:%x at %lx, size %u\n",
drv->uid, pfn, uptr, size);
return ret;
}
/**
+ * __remap_sgt - Remap scatter-gather table (SGT) to user-space.
+ * @uptr_table: Output table to store the user-space pointers (uptr).
+ * @pfn_table: Input table of physical page frame numbers (PFNs).
+ * @pfn_size: The size of the PFN table (number of PFNs).
+ * @uncached: Flag to indicate whether caching is enabled for the mapping.
+ *
+ * This function maps the PFNs in the scatter-gather table (SGT) to user-space addresses.
+ * It validates and remaps unique PFNs, ensuring that the mapping is correctly done
+ * while handling memory protection based on the caching flag.
+ *
+ * Return: The length of the user-space memory region mapped (in bytes) on success,
+ * or a negative error code on failure.
+ */
+static int __remap_sgt(u64 *uptr_table, u64 *pfn_table, u32 pfn_size, bool uncached)
+{
+ struct vm_area_struct *vma = NULL;
+ struct pfn_hashtable *pfn_htbl = NULL;
+ struct pfn_node *nodes = NULL, *node;
+ ulong uptr = 0;
+ ulong addr;
+ int unique_pfns = 0;
+ int ulen = 0;
+ int ret = 0;
+ int i;
+
+ pfn_htbl = vmalloc(sizeof(*pfn_htbl));
+ if (!pfn_htbl) {
+ LOG_ERR("Failed to allocate memory for PFNs hashtable.\n");
+ return -ENOMEM;
+ }
+
+ nodes = vmalloc(sizeof(*node) * pfn_size);
+ if (!nodes) {
+ vfree(pfn_htbl);
+ pr_err("Failed to allocate memory for PFN node array.\n");
+ return -ENOMEM;
+ }
+
+ hash_init(pfn_htbl->tbl);
+
+ /* Calculate unique PFNs using hash table */
+ for (i = 0; i < pfn_size; i++) {
+ u64 pfn = pfn_table[i];
+ unsigned int hash_index = hash_64(pfn, 10);
+ bool found = false;
+
+ /* Iterate over the bucket corresponding to the PFN's hash index */
+ hash_for_each_possible(pfn_htbl->tbl, node, hash, hash_index) {
+ if (node->pfn == pfn) {
+ found = true;
+ break;
+ }
+ }
+
+ if (found)
+ continue;
+
+ /* Insert the unique PFN into the hash table */
+ node = &nodes[i];
+ node->pfn = pfn;
+ node->uptr = 0;
+ hash_add(pfn_htbl->tbl, &node->hash, hash_index);
+
+ unique_pfns++;
+ }
+
+ /* Allocate contiguous memory in user space based on unique PFNs */
+ ulen = unique_pfns * PAGE_SIZE;
+ uptr = vm_mmap(NULL, 0, ulen, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_SHARED, 0);
+ if (IS_ERR_VALUE(uptr)) {
+ ulen = -ENOMEM;
+ LOG_ERR("Failed to allocate user-space memory\n");
+ goto err;
+ }
+
+ /* Validate the VMA (virtual memory area) range */
+ vma = find_vma(current->mm, uptr);
+ if (!vma || uptr < vma->vm_start || (uptr + ulen) > vma->vm_end) {
+ vm_munmap(uptr, ulen);
+ ulen = -EINVAL;
+ LOG_ERR("Invalid VMA or address range!\n");
+ goto err;
+ }
+
+ if (uncached)
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+ /* Map PFNs to user-space memory, using cached mappings where available */
+ addr = uptr;
+ for (i = 0; i < pfn_size; i++) {
+ u64 pfn = pfn_table[i];
+ u32 hash_index = hash_64(pfn, 10);
+ bool found = false;
+
+ hash_for_each_possible(pfn_htbl->tbl, node, hash, hash_index) {
+ if (node->pfn == pfn) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ vm_munmap(uptr, ulen);
+ ulen = -EINVAL;
+ LOG_ERR("Failed to find valid pfn:%llx\n", pfn);
+ goto err;
+ }
+
+ if (node->uptr) {
+ uptr_table[i] = node->uptr; // Use cached mapping
+ } else {
+ ret = remap_pfn_range(vma, addr, pfn, PAGE_SIZE, vma->vm_page_prot);
+ if (ret) {
+ vm_munmap(uptr, ulen);
+ ulen = -EFAULT;
+ LOG_ERR("Failed to map PFN %llx to user space at addr %lx, error %d\n", pfn, addr, ret);
+ goto err;
+ }
+
+ node->uptr = addr;
+ uptr_table[i] = addr;
+ addr += PAGE_SIZE;
+ }
+ }
+err:
+ if (nodes)
+ vfree(nodes);
+ if (pfn_htbl)
+ vfree(pfn_htbl);
+
+ return ulen;
+}
+
+/**
+ * du_sgt_mmap - Map DU scatter-gather table to user-space.
+ * @drv: Pointer to the aml_dhp_drv structure (driver).
+ * @arg: Argument passed from user space, containing memory and PFN details.
+ *
+ * This function maps a scatter-gather table (SGT) of PFNs into the user-space virtual
+ * address space. It allocates memory for both the PFN and user-space pointer tables,
+ * performs the remapping of the PFNs to user space, and then returns the results back to user space.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int du_sgt_mmap(struct aml_dhp_drv *drv, ulong arg)
+{
+ struct aml_dhp_ioctl_data __user *uarg = (void *)arg;
+ struct aml_dhp_ioctl_data io;
+ bool uncached = false;
+ u64 *pfn_table = NULL;
+ u32 pfn_num;
+ u64 *uptr_table = NULL;
+ int uptr_num;
+ int uptr_len;
+ ktime_t kt_earlier;
+ int ret = 0;
+
+ /* Copy data from user space */
+ if (copy_from_user(&io, uarg, sizeof(io)))
+ return -EFAULT;
+
+ /* Get memory size for data unit (DU) */
+ pfn_num = get_du_mem_size(&io.base.src);
+ if (!pfn_num) {
+ LOG_ERR("DU size is invalid.\n");
+ return -EINVAL;
+ }
+
+ uptr_num = get_du_mem_size(&io.base.dst);
+ if (pfn_num > uptr_num) {
+ LOG_ERR("dst-tab:%d is smaller than src-tab:%d.\n",
+ uptr_num, pfn_num);
+ return -EINVAL;
+ }
+
+ /* Allocate memory for sg_table and uptr_table */
+ pfn_table = vmalloc(pfn_num * sizeof(u64));
+ if (!pfn_table) {
+ return -ENOMEM;
+ }
+
+ uptr_table = vmalloc(pfn_num * sizeof(u64));
+ if (!uptr_table) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ /* Copy pfn table from user */
+ if (copy_from_user(pfn_table, (void __user *)io.base.src.sgt, pfn_num * sizeof(u64))) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ uncached = !!get_du_mem_cache_mode(&io.base.dst);
+
+ /* Remap PFNs table */
+ kt_earlier = ktime_get();
+ uptr_len = __remap_sgt(uptr_table, pfn_table, pfn_num, uncached);
+ if (uptr_len < 0) {
+ ret = uptr_len;
+ goto err;
+ }
+
+ LOG_TRACE("[%u]: Remap SGT elapse:%lld ms.\n",
+ drv->uid, ktime_ms_delta(ktime_get(), kt_earlier));
+
+ /* Copy the mapped results back to user space */
+ io.version = DHP_DRV_VER;
+ io.type = AML_DHP_TYPE_MEM;
+ io.base.dst.type = AML_MEM_TYPE_SG_TBL;
+ io.base.dst.payload = uptr_len;
+
+ if (copy_to_user((void __user *)io.base.dst.sgt, uptr_table, pfn_num * sizeof(u64)) ||
+ copy_to_user(uarg, &io, sizeof(io))) {
+ vm_munmap(uptr_table[0], uptr_len);
+ ret = -EFAULT;
+ }
+
+ LOG_DEBUG("[%u]: DU Mapped SGT to uptr: %lx, size: %u\n",
+ drv->uid, uptr_table[0], uptr_len);
+err:
+ if (pfn_table)
+ vfree(pfn_table);
+ if (uptr_table)
+ vfree(uptr_table);
+
+ return ret;
+}
+
+/**
+ * __aml_dhp_sgt_sync - Synchronizes a scatter-gather table for CPU or device access.
+ * @drv: Pointer to the DHP driver structure.
+ * @mem: Pointer to the memory descriptor structure.
+ * @dir: Direction of data transfer (e.g., DMA_FROM_DEVICE or DMA_TO_DEVICE).
+ * @for_cpu: Boolean indicating whether the synchronization is for CPU access (true)
+ * or device access (false).
+ *
+ * This function ensures proper synchronization of the scatter-gather table to allow
+ * CPU or device access. Unique PFNs are identified using a hash table, and synchronization
+ * operations are performed for each unique PFN. The function supports both directions
+ * of data transfer and handles resource cleanup in case of errors.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int __aml_dhp_sgt_sync(struct aml_dhp_drv *drv,
+ struct aml_du_mem *mem,
+ enum dma_data_direction dir,
+ bool for_cpu)
+{
+ struct pfn_hashtable *pfn_htbl = NULL;
+ struct pfn_node *nodes = NULL, *node;
+ u64 *pfn_table = NULL;
+ u32 pfn_num = 0;
+ int ret = 0;
+ int i;
+
+ pfn_num = get_du_mem_size(mem);
+ if (!pfn_num) {
+ LOG_ERR("DU size is invalid.\n");
+ return -EINVAL;
+ }
+
+ pfn_htbl = vmalloc(sizeof(*pfn_htbl));
+ if (!pfn_htbl) {
+ LOG_ERR("Failed to allocate memory for PFNs hashtable.\n");
+ return -ENOMEM;
+ }
+
+ nodes = vmalloc(pfn_num * sizeof(*node));
+ if (!nodes) {
+ ret = -ENOMEM;
+ LOG_ERR("Failed to allocate memory for PFNs nodes.\n");
+ goto err;
+ }
+
+ pfn_table = vmalloc(pfn_num * sizeof(u64));
+ if (!pfn_table) {
+ ret = -ENOMEM;
+ LOG_ERR("Failed to allocate memory for PFNs table.\n");
+ goto err;
+ }
+
+ if (copy_from_user(pfn_table, (void __user *)mem->sgt, pfn_num * sizeof(u64))) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ hash_init(pfn_htbl->tbl);
+
+ /* Calculate unique PFNs using hash table */
+ for (i = 0; i < pfn_num; i++) {
+ u64 pfn = pfn_table[i];
+ u32 hash_index = hash_64(pfn, 10);
+ bool found = false;
+
+ /* Iterate over the bucket corresponding to the PFN's hash index */
+ hash_for_each_possible(pfn_htbl->tbl, node, hash, hash_index) {
+ if (node->pfn == pfn) {
+ found = true;
+ break;
+ }
+ }
+
+ if (found)
+ continue;
+
+ if (for_cpu)
+ dma_sync_single_for_cpu(drv->dev, (ulong)__pfn_to_phys(pfn), PAGE_SIZE, dir);
+ else
+ dma_sync_single_for_device(drv->dev, (ulong)__pfn_to_phys(pfn), PAGE_SIZE, dir);
+
+ /* Insert the unique PFN into the hash table */
+ node = &nodes[i];
+ node->pfn = pfn;
+ hash_add(pfn_htbl->tbl, &node->hash, hash_index);
+ }
+err:
+ if (pfn_table)
+ vfree(pfn_table);
+ if (nodes)
+ vfree(nodes);
+ if (pfn_htbl)
+ vfree(pfn_htbl);
+
+ return 0;
+}
+
+/**
+ * aml_dhp_mem_begin_cpu_access - Begins CPU access for a memory region.
+ * @drv: Pointer to the DHP driver structure.
+ * @mem: Pointer to the memory descriptor structure.
+ * @dir: Direction of data transfer (e.g., DMA_FROM_DEVICE or DMA_TO_DEVICE).
+ *
+ * This function prepares a memory region for CPU access by performing necessary
+ * synchronization. It handles different memory types, including physical addresses,
+ * PFNs, and scatter-gather tables. Synchronization ensures consistency of data for CPU reads/writes.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int aml_dhp_mem_begin_cpu_access(struct aml_dhp_drv *drv,
+ struct aml_du_mem *mem,
+ enum dma_data_direction dir)
+{
+ int ret = -1;
+
+ LOG_DEBUG("[%u]: %s, memtype:%d, addr:%lx, size:%u, uncached:%d, dirt %d\n",
+ drv->uid, __func__, mem->type, get_du_mem_addr(mem),
+ mem->size, mem->uncached, dir);
+
+ switch (mem->type) {
+ case AML_MEM_TYPE_PFN:
+ dma_sync_single_for_cpu(drv->dev, get_du_mem_addr(mem), mem->size, dir);
+ break;
+ case AML_MEM_TYPE_PHY_ADDR:
+ dma_sync_single_for_cpu(drv->dev, mem->addr, mem->size, dir);
+ break;
+ case AML_MEM_TYPE_SG_TBL:
+ /* Get memory size for data unit (DU) */
+ ret = __aml_dhp_sgt_sync(drv, mem, dir, true);
+ if (ret) {
+ LOG_ERR("Failed to memory sync, ret=%d.\n", ret);
+ return ret;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * aml_dhp_mem_end_cpu_access - Ends CPU access for a memory region.
+ * @drv: Pointer to the DHP driver structure.
+ * @mem: Pointer to the memory descriptor structure.
+ * @dir: Direction of data transfer (e.g., DMA_FROM_DEVICE or DMA_TO_DEVICE).
+ *
+ * This function concludes CPU access for a memory region and prepares the memory
+ * for device access by performing the necessary synchronization. It ensures data
+ * consistency for device operations after the CPU is done accessing the memory.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int aml_dhp_mem_end_cpu_access(struct aml_dhp_drv *drv,
+ struct aml_du_mem *mem,
+ enum dma_data_direction dir)
+{
+ int ret = -1;
+
+ LOG_DEBUG("[%u]: %s, memtype:%d, addr:%lx, size:%u, uncached:%d, dirt %d\n",
+ drv->uid, __func__, mem->type, get_du_mem_addr(mem),
+ mem->size, mem->uncached, dir);
+
+ switch (mem->type) {
+ case AML_MEM_TYPE_PFN:
+ dma_sync_single_for_device(drv->dev, get_du_mem_addr(mem), mem->size, dir);
+ break;
+ case AML_MEM_TYPE_PHY_ADDR:
+ dma_sync_single_for_device(drv->dev, mem->addr, mem->size, dir);
+ break;
+ case AML_MEM_TYPE_SG_TBL:
+ /* Get memory size for data unit (DU). */
+ ret = __aml_dhp_sgt_sync(drv, mem, dir, false);
+ if (ret) {
+ LOG_ERR("Failed to memory sync, ret=%d.\n", ret);
+ return ret;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * aml_dhp_mem_sync - Synchronizes memory for CPU or device access based on command.
+ * @drv: Pointer to the DHP driver structure.
+ * @cmd: Command specifying the type of synchronization (e.g., read or write).
+ * @arg: User-space pointer to the ioctl data structure.
+ *
+ * This function provides a unified interface for synchronizing memory access between
+ * CPU and device. It validates input parameters, determines the synchronization direction,
+ * and delegates to the appropriate helper function based on the command (e.g., begin or end access).
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static long aml_dhp_mem_sync(struct aml_dhp_drv *drv, u32 cmd, ulong arg)
+{
+ struct aml_dhp_ioctl_data __user *uarg = (void *)arg;
+ struct aml_dhp_ioctl_data io;
+ struct aml_du_mem *mem = &io.mem;
+ enum dma_data_direction direction;
+ int ret;
+
+ if (copy_from_user(&io, (void __user *) uarg, sizeof(io)))
+ return -EFAULT;
+
+ if (mem->syncflag & ~DHP_MEM_SYNC_VALID_FLAGS_MASK)
+ return -EINVAL;
+
+ switch (mem->syncflag & DHP_MEM_SYNC_RW) {
+ case DHP_MEM_SYNC_READ:
+ direction = DMA_FROM_DEVICE;
+ break;
+ case DHP_MEM_SYNC_WRITE:
+ direction = DMA_TO_DEVICE;
+ break;
+ case DHP_MEM_SYNC_RW:
+ direction = DMA_BIDIRECTIONAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (io.mem.syncflag & DHP_MEM_SYNC_END)
+ ret = aml_dhp_mem_end_cpu_access(drv, mem, direction);
+ else
+ ret = aml_dhp_mem_begin_cpu_access(drv, mem, direction);
+
+ return ret;
+}
+
+/**
* aml_dhp_ioctl - Handle IOCTL commands for the driver.
* @file: Pointer to the file structure.
* @cmd: IOCTL command.
@@ -928,8 +1633,16 @@
LOG_ERR("Failed to mmap.\n");
break;
}
- case IOCTL_DHP_SCT_MAP: {
- //TODO
+ case IOCTL_DHP_SGT_MAP: {
+ ret = du_sgt_mmap(drv, arg);
+ if (ret)
+ LOG_ERR("Failed to sgt mmap.\n");
+ break;
+ }
+ case IOCTL_DHP_MEM_SYNC: {
+ ret = aml_dhp_mem_sync(drv, cmd, arg);
+ if (ret)
+ LOG_ERR("Failed to memory sync.\n");
break;
}
case IOCTL_DHP_SET_TASK: {
@@ -941,7 +1654,7 @@
return -EINVAL;
}
- return 0;
+ return ret;
}
/**
@@ -1016,12 +1729,13 @@
INIT_LIST_HEAD(&drv->node);
init_waitqueue_head(&drv->du_wq);
kref_init(&drv->ref);
+ drv->dev = dev->dev;
INIT_KFIFO(drv->du_free);
INIT_KFIFO(drv->du_done);
ver_to_string(DHP_DRV_VER, ver);
- drv->du_pool = vzalloc(DU_SIZE * sizeof(*drv->du_pool));
+ drv->du_pool = vmalloc(DU_SIZE * sizeof(*drv->du_pool));
if (!drv->du_pool) {
LOG_ERR("Alloc task pool fail.\n");
kfree(drv);
@@ -1034,6 +1748,8 @@
du->dev = dev->dev;
du->priv = drv;
init_completion(&du->comp);
+ mutex_init(&du->lock);
+ INIT_LIST_HEAD(&du->attachments);
kfifo_put(&drv->du_free, du);
}
@@ -1207,7 +1923,7 @@
struct aml_dhp_dev *dev = NULL;
int ret = -1;
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ dev = kmalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
@@ -1242,6 +1958,12 @@
goto err3;
}
+ ret = dma_coerce_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ LOG_ERR("Set dma mask fail.\n");
+ goto err4;
+ }
+
INIT_LIST_HEAD(&dev->inst_head);
mutex_init(&dev->mutex);
dhp_func_reg(__aml_dhp_task);
@@ -1250,6 +1972,8 @@
LOG_INFO("DHP driver init success.\n");
return 0;
+err4:
+ device_destroy(&dhp_class, dev->dev_no);
err3:
class_unregister(&dhp_class);
err2:
diff --git a/drivers/frame_provider/aml_dhp/aml_dhp_drv.h b/drivers/frame_provider/aml_dhp/aml_dhp_drv.h
index b31c093..2c3c507 100644
--- a/drivers/frame_provider/aml_dhp/aml_dhp_drv.h
+++ b/drivers/frame_provider/aml_dhp/aml_dhp_drv.h
@@ -40,24 +40,44 @@
/*
* struct data_unit - Represents a data unit managed by the Data Handler Proxy (DHP).
*
- * @sg_tbl : Scatter-gather table containing the memory scatter list information for DMA operations.
- * @dbuf : Pointer to the DMA buffer associated with this data unit.
- * @vmah : VMA handler information, including reference count and functions for memory mapping.
- * @uncached: Indicates if the data unit is using uncached memory mode (true if enabled).
- * @refcnt : Reference count for managing the lifetime of the DMA buffer.
- * @node : List node entry for linking this data unit within a list.
- * @comp : Completion synchronization primitive to signal when the data unit has finished processing.
- * @dev : Pointer to the GDHP device associated with this data unit, used for device operations.
- * @priv : Pointer to private context information specific to the GDHP implementation.
- * @pb : Pointer to the base structure containing additional data unit-related information.
- * @src : Source information structure containing data to be processed.
- * @dst : Destination information structure where the processed data will be stored.
+ * @attachments: List of additional resources or components attached to this data unit,
+ * such as associated metadata or linked resources.
+ * @lock : Mutex to ensure thread-safe access and modification of the data unit's members.
+ * @sg_tbl : Scatter-gather table containing memory descriptors for DMA operations,
+ * providing efficient handling of non-contiguous memory regions.
+ * @dbuf : Pointer to the DMA buffer associated with this data unit,
+ * used for transferring data between the CPU and a device.
+ * @vmah : Virtual Memory Area (VMA) handler containing metadata for memory mappings
+ * and helper functions for managing user-space memory views.
+ * @uncached : Indicates whether the memory associated with this data unit is uncached.
+ * If true, memory access bypasses the CPU cache for coherent operations.
+ * @mapped : Flag indicating whether the data unit has been mapped to a virtual memory
+ * address space for device or user-space access.
+ * @refcnt : Reference counter to manage the lifecycle of the data unit.
+ * Ensures the structure remains valid until all references are released.
+ * @node : List node for linking this data unit into a list,
+ * allowing management of multiple data units as a collection.
+ * @comp : Completion synchronization primitive used to notify waiting threads
+ * when processing of the data unit is complete.
+ * @dev : Pointer to the DHP (The Data Handler Proxy) device associated with this data unit,
+ * used for managing hardware-specific operations and resources.
+ * @priv : Pointer to private context information specific to the DHP implementation,
+ * allowing customization or extension for specific use cases.
+ * @pb : Pointer to the base structure `aml_du_base`, which provides shared information
+ * and common configuration for the data unit.
+ * @src : Source memory or data structure representing the input for processing by this data unit.
+ * Typically includes data addresses, sizes, and flags.
+ * @dst : Destination memory or data structure where the processed output of this data unit
+ * will be stored. Includes similar attributes as the source.
*/
struct data_unit {
+ struct list_head attachments;
+ struct mutex lock;
struct sg_table sg_tbl;
struct dma_buf *dbuf;
struct du_vma_hdr vmah;
bool uncached;
+ bool mapped;
refcount_t refcnt;
struct list_head node;
@@ -69,24 +89,38 @@
};
/*
- * struct aml_dhp_drv - Represents the driver context for the Data Handler Proxy (DHP).
+ * struct aml_dhp_drv - Represents the driver context for The Data Handler Proxy (DHP).
*
* @ref : Reference count for managing the lifetime of the driver instance.
+ * Ensures the driver context remains valid while in use.
* @user : Name of the user task that created this driver instance, stored in a character array.
- * @uid : Unique identifier for the driver instance, used for tracking.
- * @node : List node entry for linking this driver instance within a list of DHP instances.
+ * Useful for debugging and tracking the task that initiated the driver.
+ * @uid : Unique identifier for the driver instance, used for tracking and distinguishing
+ * between multiple instances of the driver.
+ * @dev : Pointer to the underlying device structure associated with this driver instance.
+ * Represents the hardware context managed by the driver.
+ * @node : List node entry for linking this driver instance within a global or subsystem-specific
+ * list of DHP driver instances.
* @du_head : Head of the list containing active data units (DUs) associated with this driver.
+ * Manages the lifecycle and operations on active DUs.
* @du_mutex : Mutex for synchronizing access to the DU list and related operations.
- * @du_wq : Wait queue for managing pending data unit tasks and signaling completion.
+ * Ensures thread-safe management of data units.
+ * @du_wq : Wait queue for managing pending data unit tasks and signaling when operations complete.
+ * Supports efficient task coordination.
* @du_pending : Indicates if there are pending tasks associated with data units.
- * @du_pool : Pointer to an array of data units (DUs) managed by this driver.
+ * Used as a counter or flag to track the number of unfinished tasks.
+ * @du_pool : Pointer to an array of preallocated data units (DUs) managed by this driver.
+ * Acts as a resource pool to minimize dynamic memory allocations during runtime.
* @du_free : FIFO queue for tracking available data units that can be reused.
+ * Optimizes resource usage by recycling completed DUs.
* @du_done : FIFO queue for tracking completed data units.
+ * Allows the driver to efficiently handle post-processing or release operations.
*/
struct aml_dhp_drv {
struct kref ref;
char user[TASK_COMM_LEN];
u32 uid;
+ struct device *dev;
struct list_head node;
struct list_head du_head;
struct mutex du_mutex;
diff --git a/drivers/frame_provider/aml_dhp/aml_dhp_if.h b/drivers/frame_provider/aml_dhp/aml_dhp_if.h
index 23388a3..5abbf12 100644
--- a/drivers/frame_provider/aml_dhp/aml_dhp_if.h
+++ b/drivers/frame_provider/aml_dhp/aml_dhp_if.h
@@ -24,23 +24,73 @@
#include "aml_dhp_types.h"
-#define _IOCTL_DHP_MAGIC 'Q'
-
-/* The processed data is returned to userspace as an FD. */
-#define IOCTL_DHP_GET_FD _IOR(_IOCTL_DHP_MAGIC, 0, __u64)
-
-/* Used for mapping of a page. */
-#define IOCTL_DHP_MMAP _IOWR(_IOCTL_DHP_MAGIC, 1, __u64)
-
-/* Used for mapping of a page list. */
-#define IOCTL_DHP_SCT_MAP _IOWR(_IOCTL_DHP_MAGIC, 2, __u64) //TODO
-
-/* Userspace submits a data processing task to the proc driver. */
-#define IOCTL_DHP_SET_TASK _IOW(_IOCTL_DHP_MAGIC, 3, __u64) //TODO
-
+/*
+ * DHP_VER - Macro to encode the DHP version in a major.minor.patch format.
+ *
+ * @a: Major version number (16 bits).
+ * @b: Minor version number (8 bits).
+ * @c: Patch version number (8 bits).
+ */
#define DHP_VER(a,b,c) (((a) << 16) + ((b) << 8) + (c))
-// Maximum number of metadata entries for data units
+/*
+ * IOCTL commands for the Data Handler Proxy (DHP) driver:
+ *
+ * - IOCTL_DHP_GET_FD: Retrieves a file descriptor (FD) for processed data, allowing
+ * userspace applications to access the output.
+ *
+ * - IOCTL_DHP_MMAP: Maps a single memory page for DMA or other operations, enabling
+ * efficient data processing by the driver.
+ *
+ * - IOCTL_DHP_SGT_MAP: Maps a scatter-gather table, facilitating the handling of
+ * non-contiguous memory regions for complex data processing tasks.
+ *
+ * - IOCTL_DHP_SET_TASK: Submits a data processing task to the driver. Userspace applications
+ * provide task descriptors containing operation details (TODO: define task descriptor structure).
+ *
+ * - IOCTL_DHP_MEM_SYNC: Ensures cache coherency between the CPU and device memory for specified
+ * memory regions. This is critical for accurate data transfer during DMA operations.
+ */
+#define _IOCTL_DHP_MAGIC 'Q'
+#define IOCTL_DHP_GET_FD _IOR(_IOCTL_DHP_MAGIC, 0, __u64)
+#define IOCTL_DHP_MMAP _IOWR(_IOCTL_DHP_MAGIC, 1, __u64)
+#define IOCTL_DHP_SGT_MAP _IOWR(_IOCTL_DHP_MAGIC, 2, __u64)
+#define IOCTL_DHP_SET_TASK _IOW(_IOCTL_DHP_MAGIC, 3, __u64)
+#define IOCTL_DHP_MEM_SYNC _IOW(_IOCTL_DHP_MAGIC, 4, __u64)
+
+/*
+ * Memory synchronization flags for the Data Handler Proxy (DHP) driver:
+ *
+ * - DHP_MEM_SYNC_READ: Synchronize memory for reading (device-to-CPU), ensuring
+ * data in device memory is visible to the CPU.
+ *
+ * - DHP_MEM_SYNC_WRITE: Synchronize memory for writing (CPU-to-device), ensuring
+ * data in CPU memory is visible to the device.
+ *
+ * - DHP_MEM_SYNC_RW: Combines both read and write synchronization, allowing
+ * bi-directional cache coherency.
+ *
+ * - DHP_MEM_SYNC_START: Indicates synchronization at the start of memory usage,
+ * preparing memory for operations.
+ *
+ * - DHP_MEM_SYNC_END: Indicates synchronization at the end of memory usage, finalizing
+ * memory operations and ensuring data integrity.
+ *
+ * - DHP_MEM_SYNC_VALID_FLAGS_MASK: Defines the valid combination of flags for memory
+ * synchronization operations, restricting flags to only supported values.
+ */
+#define DHP_MEM_SYNC_READ (1 << 0)
+#define DHP_MEM_SYNC_WRITE (2 << 0)
+#define DHP_MEM_SYNC_RW (DHP_MEM_SYNC_READ | DHP_MEM_SYNC_WRITE)
+#define DHP_MEM_SYNC_START (0 << 2)
+#define DHP_MEM_SYNC_END (1 << 2)
+#define DHP_MEM_SYNC_VALID_FLAGS_MASK \
+ (DHP_MEM_SYNC_RW | DHP_MEM_SYNC_END)
+
+/* Specifies the upper limit for data size that can be transferred or processed in a single task. */
+#define IOCTL_DHP_PAYLOAD_MAX (512)
+
+/* Maximum number of metadata entries for data units. */
#define AML_DU_META_MAX (32)
/*
@@ -127,11 +177,11 @@
union {
struct aml_du_base base;
struct aml_du_mem mem;
- __u32 data[64];
+ __u8 data[IOCTL_DHP_PAYLOAD_MAX];
};
__s32 fd;
__u32 reserved[16];
-};
+} __attribute__((packed));
/**
* aml_dhp_request - Perform data processing using source and destination memory buffers.
diff --git a/drivers/frame_provider/aml_dhp/aml_dhp_types.h b/drivers/frame_provider/aml_dhp/aml_dhp_types.h
index c53cf6c..634eece 100644
--- a/drivers/frame_provider/aml_dhp/aml_dhp_types.h
+++ b/drivers/frame_provider/aml_dhp/aml_dhp_types.h
@@ -22,44 +22,69 @@
#include <linux/types.h>
-// Data Handler Proxy (DHP) supported data types
+/*
+ * Data Handler Proxy (DHP) supported data types and memory types.
+ *
+ * TAG(a, b, c, d):
+ * Macro to generate a 32-bit identifier using four character constants.
+ * This is used to define unique data type tags.
+ */
#define TAG(a, b, c, d) ((a << 24) | (b << 16) | (c << 8) | d)
+/*
+ * DHP Supported Data Types:
+ * AML_DHP_TYPE_AVBCD: Represents amlogic video buffer compression decoding tasks.
+ * AML_DHP_TYPE_AVBCE: Represents amlogic video buffer compression encoding tasks.
+ * AML_DHP_TYPE_MEM: Represents general memory processing tasks.
+ */
#define AML_DHP_TYPE_AVBCD TAG('V', 'B', 'C', 'D')
#define AML_DHP_TYPE_AVBCE TAG('V', 'B', 'C', 'E')
#define AML_DHP_TYPE_MEM TAG('P', 'M', 'E', 'M')
//ADD...
-// Memory type definitions for various address types
-#define AML_MEM_TYPE_PFN (1) // Page Frame Number
-#define AML_MEM_TYPE_PHY_ADDR (2) // Physical address
-#define AML_MEM_TYPE_KPTR_ADDR (3) // TODO: Kernel pointer address
-#define AML_MEM_TYPE_UPTR_ADDR (4) // User pointer address
-#define AML_MEM_TYPE_SG_TBL (5) // TODO: Scatter-gather table
+/*
+ * Memory Type Definitions (AML_MEM_TYPE_*):
+ * These constants define the type of memory used in data processing tasks:
+ * AML_MEM_TYPE_PFN: Memory described by a Page Frame Number (PFN).
+ * AML_MEM_TYPE_PHY_ADDR: Memory described by a physical address.
+ * AML_MEM_TYPE_KPTR_ADDR: Reserved for future use, represents a kernel pointer address (TODO).
+ * AML_MEM_TYPE_UPTR_ADDR: Memory described by a user-space pointer.
+ * AML_MEM_TYPE_SG_TBL: Reserved for future use, represents a scatter-gather table.
+ */
+#define AML_MEM_TYPE_PFN (1)
+#define AML_MEM_TYPE_PHY_ADDR (2)
+#define AML_MEM_TYPE_KPTR_ADDR (3)
+#define AML_MEM_TYPE_UPTR_ADDR (4)
+#define AML_MEM_TYPE_SG_TBL (5)
/*
- * struct aml_du_mem - Represents memory information for a data unit.
+ * struct aml_du_mem - Represents memory information for a data unit in the DHP driver.
*
- * @type : Type of memory used (e.g., PFN, physical address, etc.).
- * @addr : Generic address for the memory (used based on type).
- * @pfn : Page Frame Number if the type is AML_MEM_TYPE_PFN.
- * @kptr : Kernel pointer if the type is AML_MEM_TYPE_KPTR_ADDR (TODO).
- * @uptr : User pointer if the type is AML_MEM_TYPE_UPTR_ADDR.
- * @sgt : Scatter-gather table if the type is AML_MEM_TYPE_SG_TBL (TODO).
- * @size : Size of the memory region.
- * @payload : Additional data or flags associated with the memory.
+ * @type : Type of memory used (e.g., PFN, physical address, etc.), as defined by AML_MEM_TYPE_*.
+ * @addr : Generic address for the memory. This is used when the specific memory type
+ * does not require further specialization (e.g., physical address).
+ * @pfn : Page Frame Number. Used when @type is AML_MEM_TYPE_PFN.
+ * @kptr : Kernel pointer. Reserved for future use when @type is AML_MEM_TYPE_KPTR_ADDR.
+ * @uptr : User pointer. Used when @type is AML_MEM_TYPE_UPTR_ADDR.
+ * @sgt : Scatter-gather table. Reserved for future use when @type is AML_MEM_TYPE_SG_TBL.
+ * @size : Size of the memory region in bytes.
+ * @payload : Auxiliary data for the memory unit (purpose and content application-specific).
+ * @uncached : Indicates whether the memory region uses uncached memory. Non-zero if true.
+ * @syncflag : Synchronization flags for memory operations, based on DHP_MEM_SYNC_* definitions.
*/
struct aml_du_mem {
- __u32 type;
+ __u32 type;
union {
__u64 addr;
__u64 pfn;
__u64 kptr; // TODO
__u64 uptr;
- __u64 sgt; // TODO
+ __u64 sgt;
};
- __u32 size;
- __u32 payload;
+ __u32 size;
+ __u32 payload;
+ __u32 uncached;
+ __u64 syncflag;
} __attribute__((packed));
/*
diff --git a/drivers/frame_provider/aml_dhp/dhp_daemon/aml_dhp_common.h b/drivers/frame_provider/aml_dhp/dhp_daemon/aml_dhp_common.h
index 1498156..e60675c 100644
--- a/drivers/frame_provider/aml_dhp/dhp_daemon/aml_dhp_common.h
+++ b/drivers/frame_provider/aml_dhp/dhp_daemon/aml_dhp_common.h
@@ -25,6 +25,10 @@
#define TAG(a, b, c, d)\
((a << 24) | (b << 16) | (c << 8) | d)
+typedef uint8_t u8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef uint64_t u64;
/**
* container_of - cast a member of a structure out to the containing structure
diff --git a/drivers/frame_provider/aml_dhp/dhp_daemon/aml_dhp_core.c b/drivers/frame_provider/aml_dhp/dhp_daemon/aml_dhp_core.c
index d3d30ea..a7eb910 100644
--- a/drivers/frame_provider/aml_dhp/dhp_daemon/aml_dhp_core.c
+++ b/drivers/frame_provider/aml_dhp/dhp_daemon/aml_dhp_core.c
@@ -66,6 +66,9 @@
const struct aml_dhp_ioctl_data *io =
container_of(base, struct aml_dhp_ioctl_data, base);
struct aml_du_avbcd *avbcd = (struct aml_du_avbcd *)base->meta;
+ DhpMemOps mOps = { .sgt_mmap = dhp_mem_sgt_mmap,
+ .sgt_msync = dhp_mem_sgt_sync,
+ .unmmap = dhp_mem_munmap };
struct timeval t0, t1;
gettimeofday(&t0, NULL);
@@ -88,6 +91,7 @@
iomem.mem.type = AML_MEM_TYPE_PHY_ADDR;
iomem.mem.addr = io->base.dst.addr;
iomem.mem.size = io->base.dst.size;
+ iomem.mem.uncached = io->base.dst.uncached;
if (dhp_dev_ioctl(dev, IOCTL_DHP_MMAP, &iomem)) {
LOG_ERROR("IOCTL_DHP_MMAP failed, addr:%llx\n", iomem.mem.addr);
@@ -98,9 +102,19 @@
dst_yuv = (void *)iomem.mem.uptr;
dst_size = iomem.mem.size;
- LOG_DEBUG("Mapping YUV buffer: %p, size: %u\n", dst_yuv, dst_size);
+ LOG_DEBUG("Mapping Header buffer:%p, size:%u\n", header, avbcd->hsize);
+ LOG_DEBUG("Mapping YUV buffer:%p, size:%u\n", dst_yuv, dst_size);
- aml_avbc_decode(header, avbcd->width, avbcd->height, stride, avbcd->bitdep, dst_yuv, dst_size, dhp_page_mmap, dev);
+ aml_avbc_decode(header,
+ avbcd->width,
+ avbcd->height,
+ stride,
+ avbcd->bitdep,
+ dst_yuv,
+ dst_size,
+ io->base.src.uncached,
+ &mOps,
+ dev);
gettimeofday(&t1, NULL);
LOG_VERBOSE("%s, Total elapse: %lu ms.\n",
@@ -109,6 +123,11 @@
if (dump_data)
dump_yuv_data(avbcd->pts, avbcd->width, avbcd->height, stride, avbcd->bitdep, dst_yuv, dst_size);
+ u64 flags = DHP_MEM_SYNC_READ | DHP_MEM_SYNC_END;
+ dhp_mem_sync(dev, io->base.dst.addr, io->base.dst.size, flags);
+
+ dhp_dbuf_sync(io->fd, flags);
+
dhp_dbuf_munmap(header, avbcd->hsize);
dhp_dbuf_munmap(dst_yuv, dst_size);
diff --git a/drivers/frame_provider/aml_dhp/dhp_daemon/aml_dhp_daemon.c b/drivers/frame_provider/aml_dhp/dhp_daemon/aml_dhp_daemon.c
index 7f404ad..eb9de17 100644
--- a/drivers/frame_provider/aml_dhp/dhp_daemon/aml_dhp_daemon.c
+++ b/drivers/frame_provider/aml_dhp/dhp_daemon/aml_dhp_daemon.c
@@ -22,6 +22,7 @@
#include <stdint.h>
#include <string.h>
#include <linux/version.h>
+#include <linux/dma-buf.h>
#include "aml_dhp_common.h"
#include "aml_dhp_daemon.h"
@@ -30,8 +31,8 @@
#define DHP_DAEMON_VER TAG('v', 1, 0, 0)
volatile sig_atomic_t keep_running = 1;
-unsigned int dump_data = 0;
-unsigned int g_idx = -1;
+u32 dump_data = 0;
+u32 g_idx = -1;
crc_ctx_t *g_crc;
/*
@@ -81,37 +82,151 @@
}
}
-void *dhp_dbuf_mmap(int fd, unsigned int len, int prot, int flags, unsigned int offset)
+void *dhp_dbuf_mmap(int fd, u32 len, int prot, int flags, u32 offset)
{
- void *addr = mmap(NULL, len, prot, flags, fd, offset);
+ void *addr = NULL;
+
+ LOG_TRACE("%s: DBUF mmap, fd:%d, len:%u, prot:%x, flags:%x, off:%u\n",
+ __func__, fd, len, prot, flags, offset);
+
+ addr = mmap(NULL, len, prot, flags, fd, offset);
if (addr == MAP_FAILED) {
- LOG_ERROR("Memory mapping failed: fd=%d, len=%u, offset=%u\n", fd, len, offset);
+ LOG_ERROR("dmabuf mmap failed: fd=%d len=%u offset=%u\n", fd, len, offset);
return NULL;
}
return addr;
}
-void *dhp_page_mmap(void *priv, unsigned int pfn)
+void dhp_dbuf_munmap(void *vaddr, u32 len)
+{
+ LOG_TRACE("%s: DBUF munmap, vaddr:%p, len:%u\n",
+ __func__, vaddr, len);
+
+ if (munmap(vaddr, len) == -1) {
+ LOG_ERROR("dmabuf munmap failed: addr=%p len=%u\n", vaddr, len);
+ }
+}
+
+void dhp_dbuf_sync(int fd, u32 flags)
+{
+ const struct dma_buf_sync sync = { flags };
+
+ LOG_TRACE("%s: DBUF sync, flags:%x, len:%u\n",
+ __func__, flags);
+
+ if (ioctl(fd, DMA_BUF_IOCTL_SYNC, &sync)) {
+ LOG_ERROR("dmabuf sync failed: fd=%d\n", fd);
+ }
+}
+
+void *dhp_page_mmap(void *priv, u32 pfn, u32 uncached)
{
Device *dev = priv;
- struct aml_dhp_ioctl_data io;
+ struct aml_dhp_ioctl_data io = {0};
- io.mem.type = AML_MEM_TYPE_PFN;
- io.mem.pfn = pfn;
- io.mem.size = PAGE_SIZE;
+ io.mem.type = AML_MEM_TYPE_PFN;
+ io.mem.pfn = pfn;
+ io.mem.size = PAGE_SIZE;
+
+ LOG_TRACE("%s: Page mmap, PFN:%lx, uncached:%u\n",
+ __func__, pfn, uncached);
if (dhp_dev_ioctl(dev, IOCTL_DHP_MMAP, &io)) {
- LOG_ERROR("Failed to map page (PFN: %x)\n", pfn);
+ LOG_ERROR("page mmap failed: PFN=%x\n", pfn);
return NULL;
}
return (void *)io.mem.uptr;
}
-void dhp_dbuf_munmap(void *vaddr, unsigned int len)
+void *dhp_mem_mmap(void *priv, u64 addr, u32 size, u32 uncached)
{
+ Device *dev = priv;
+ struct aml_dhp_ioctl_data io = {0};
+
+ io.mem.type = AML_MEM_TYPE_PHY_ADDR;
+ io.mem.addr = addr;
+ io.mem.size = size;
+ io.mem.uncached = uncached;
+
+ LOG_TRACE("%s: MEM mmap, addr:%llx, size:%u, uncached:%u\n",
+ __func__, addr, size, uncached);
+
+ if (dhp_dev_ioctl(dev, IOCTL_DHP_MMAP, &io)) {
+ LOG_ERROR("mem mmap failed: addr=%lx size=%u\n", addr, size);
+ return NULL;
+ }
+
+ return (void *)io.mem.uptr;
+}
+
+void dhp_mem_sync(void *priv, u64 addr, u32 size, u32 flags)
+{
+ Device *dev = priv;
+ struct aml_dhp_ioctl_data io = {0};
+
+ io.mem.type = AML_MEM_TYPE_PHY_ADDR;
+ io.mem.addr = addr;
+ io.mem.size = size;
+ io.mem.syncflag = flags;
+
+ LOG_TRACE("%s: MEM sync, addr:%llx, size:%u, flags:%x\n",
+ __func__, addr, size, flags);
+
+ if (dhp_dev_ioctl(dev, IOCTL_DHP_MEM_SYNC, &io)) {
+ LOG_ERROR("mem sync failed: addr=%lx size=%u flags=%x\n", addr, size, flags);
+ }
+}
+
+int dhp_mem_sgt_mmap(void *priv, u64 *uptr_array, u64 *pfn_array, u32 num, u32 uncached)
+{
+ Device *dev = priv;
+ struct aml_dhp_ioctl_data io = {0};
+
+ io.base.src.type = AML_MEM_TYPE_SG_TBL;
+ io.base.src.sgt = (unsigned long long)pfn_array;
+ io.base.src.size = num;
+
+ io.base.dst.type = AML_MEM_TYPE_SG_TBL;
+ io.base.dst.sgt = (unsigned long long)uptr_array;
+ io.base.dst.size = num;
+ io.base.dst.uncached = uncached;
+
+ LOG_TRACE("%s: SGT mmap, PFNs:%u, uncached:%u\n",
+ __func__, num, uncached);
+
+ if (dhp_dev_ioctl(dev, IOCTL_DHP_SGT_MAP, &io)) {
+ LOG_ERROR("SGT mmap failed: PFNs=%u\n", num);
+ return -1;
+ }
+
+ return io.base.dst.payload;
+}
+
+void dhp_mem_sgt_sync(void *priv, u64 *pfn_array, u32 num, u32 flags)
+{
+ Device *dev = priv;
+ struct aml_dhp_ioctl_data io = {0};
+
+ io.mem.type = AML_MEM_TYPE_SG_TBL;
+ io.base.src.sgt = (unsigned long long)pfn_array;
+ io.mem.size = num;
+ io.mem.syncflag = flags;
+
+ LOG_TRACE("%s: SGT sync, PFNs:%u, flags:%x\n", __func__, num, flags);
+
+ if (dhp_dev_ioctl(dev, IOCTL_DHP_MEM_SYNC, &io)) {
+ LOG_ERROR("SGT sync failed: PFNs=%u flags=%x\n", num, flags);
+ }
+}
+
+void dhp_mem_munmap(void *priv, u8 *vaddr, u32 len)
+{
+ LOG_TRACE("%s: MEM munmmap, vaddr:%p, len:%u\n",
+ __func__, vaddr, len);
+
if (munmap(vaddr, len) == -1) {
- LOG_ERROR("Memory unmapping failed for address: %p, length: %u\n", vaddr, len);
+ LOG_ERROR("munmap failed: addr=%p len=%u\n", vaddr, len);
}
}
diff --git a/drivers/frame_provider/aml_dhp/dhp_daemon/aml_dhp_daemon.h b/drivers/frame_provider/aml_dhp/dhp_daemon/aml_dhp_daemon.h
index 58d7d4c..7e0add7 100644
--- a/drivers/frame_provider/aml_dhp/dhp_daemon/aml_dhp_daemon.h
+++ b/drivers/frame_provider/aml_dhp/dhp_daemon/aml_dhp_daemon.h
@@ -55,7 +55,30 @@
*
* Return: Pointer to the mapped memory on success, or NULL on failure.
*/
-void *dhp_dbuf_mmap(int fd, unsigned int len, int prot, int flags, unsigned int offset);
+void *dhp_dbuf_mmap(int fd, u32 len, int prot, int flags, u32 offset);
+
+/*
+ * dhp_dbuf_munmap() - Unmaps a previously mapped memory region.
+ *
+ * @vaddr: Pointer to the memory region to unmap.
+ * @len: Length of the memory region.
+ *
+ * This function unmaps a memory region that was previously mapped using `mmap`.
+ * It removes the mapping between the virtual address and the physical memory,
+ * making the region no longer accessible by the process.
+ */
+void dhp_dbuf_munmap(void *vaddr, u32 len);
+
+/*
+ * dhp_dbuf_sync() - Synchronizes the memory buffer with the device or CPU.
+ *
+ * @fd: File descriptor associated with the memory buffer.
+ * @flags: Flags indicating the synchronization type (e.g., read/write).
+ *
+ * This function ensures that the memory buffer associated with the file descriptor
+ * is properly synchronized, ensuring that changes are visible to the device or CPU.
+ */
+void dhp_dbuf_sync(int fd, u32 flags);
/*
* dhp_page_mmap() - Maps a physical frame number (PFN) into virtual memory.
@@ -69,17 +92,79 @@
*
* Return: Pointer to the mapped memory on success, or NULL on failure.
*/
-void *dhp_page_mmap(void *priv, unsigned int pfn);
+void *dhp_page_mmap(void *priv, u32 pfn, u32 uncached);
/*
- * dhp_dbuf_munmap() - Unmaps a previously mapped memory region.
+ * dhp_mem_mmap() - Maps a memory region into virtual memory.
*
- * @vaddr: Pointer to the memory region to unmap.
- * @len: Length of the memory region.
+ * @priv: Pointer to the Device structure.
+ * @addr: Starting address of the memory region to map.
+ * @size: Size of the memory region to map.
+ * @uncached: Flag indicating whether the memory should be mapped as uncached.
*
- * This function unmaps a memory region that was previously mapped using `mmap`.
+ * This function maps the specified memory region into the process's virtual address
+ * space. The mapping can optionally be made uncached based on the `uncached` flag.
+ *
+ * Return: Pointer to the mapped memory on success, or NULL on failure.
*/
-void dhp_dbuf_munmap(void *vaddr, unsigned int len);
+void *dhp_mem_mmap(void *priv, u64 addr, u32 size, u32 uncached);
+
+/*
+ * dhp_mem_sync() - Synchronizes a memory region between device and CPU.
+ *
+ * @priv: Pointer to the Device structure.
+ * @addr: Starting address of the memory region to synchronize.
+ * @size: Size of the memory region to synchronize.
+ * @flags: Flags indicating the synchronization type (e.g., read/write).
+ *
+ * This function ensures memory consistency between the device and the CPU for the
+ * specified memory region. The `flags` parameter determines whether the memory
+ * should be synchronized for reading, writing, or both.
+ */
+void dhp_mem_sync(void *priv, u64 addr, u32 size, u32 flags);
+
+/*
+ * dhp_mem_sgt_mmap() - Maps a list of physical frame numbers (PFNs) into virtual memory.
+ *
+ * @priv: Pointer to the Device structure.
+ * @uptr_array: Array of user-space pointers to map.
+ * @pfn_array: Array of physical frame numbers to map.
+ * @num: Number of entries in the `uptr_array` and `pfn_array`.
+ * @uncached: Flag indicating whether the memory should be mapped as uncached.
+ *
+ * This function maps a list of physical frame numbers (PFNs) into the process's
+ * virtual address space using an array of user-space pointers and physical
+ * frame numbers. The mapping can be made uncached based on the `uncached` flag.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+int dhp_mem_sgt_mmap(void *priv, u64 *uptr_array, u64 *pfn_array, u32 num, u32 uncached);
+
+/*
+ * dhp_mem_sgt_sync() - Synchronizes a list of memory regions between device and CPU.
+ *
+ * @priv: Pointer to the Device structure.
+ * @pfn_array: Array of physical frame numbers to synchronize.
+ * @num: Number of entries in the `pfn_array`.
+ * @flags: Flags indicating the synchronization type (e.g., read/write).
+ *
+ * This function ensures memory consistency between the device and the CPU for a
+ * list of memory regions specified by the `pfn_array`. The `flags` parameter
+ * determines whether the memory should be synchronized for reading, writing, or both.
+ */
+void dhp_mem_sgt_sync(void *priv, u64 *pfn_array, u32 num, u32 flags);
+
+/*
+ * dhp_mem_munmap() - Unmaps a previously mapped memory region.
+ *
+ * @priv: Pointer to the Device structure.
+ * @vaddr: Pointer to the memory region to unmap.
+ * @len: Length of the memory region to unmap.
+ *
+ * This function unmaps a previously mapped memory region, removing the mapping
+ * between the virtual address and the physical memory.
+ */
+void dhp_mem_munmap(void *priv, u8 *vaddr, u32 len);
/*
* dhp_dev_ioctl() - Performs an IOCTL operation on the device.
diff --git a/drivers/frame_provider/aml_dhp/dhp_daemon/prebuild/include/avbc_interface.h b/drivers/frame_provider/aml_dhp/dhp_daemon/prebuild/include/avbc_interface.h
index 5f35f3e..4e66c7a 100644
--- a/drivers/frame_provider/aml_dhp/dhp_daemon/prebuild/include/avbc_interface.h
+++ b/drivers/frame_provider/aml_dhp/dhp_daemon/prebuild/include/avbc_interface.h
@@ -8,42 +8,172 @@
#ifndef __AML_AVBC_DEC_IF_H__
#define __AML_AVBC_DEC_IF_H__
-/**
- * @brief Memory mapping function type definition.
+/*
+ * Memory synchronization flags for the Data Handler Proxy (DHP) driver:
*
- * This function type defines the prototype for memory mapping functions that
- * can be used by the decoder.
+ * - DHP_MEM_SYNC_READ: Synchronize memory for reading (device-to-CPU), ensuring
+ * data in device memory is visible to the CPU.
+ *
+ * - DHP_MEM_SYNC_WRITE: Synchronize memory for writing (CPU-to-device), ensuring
+ * data in CPU memory is visible to the device.
+ *
+ * - DHP_MEM_SYNC_RW: Combines both read and write synchronization, allowing
+ * bi-directional cache coherency.
+ *
+ * - DHP_MEM_SYNC_START: Indicates synchronization at the start of memory usage,
+ * preparing memory for operations.
+ *
+ * - DHP_MEM_SYNC_END: Indicates synchronization at the end of memory usage, finalizing
+ * memory operations and ensuring data integrity.
+ *
+ * - DHP_MEM_SYNC_VALID_FLAGS_MASK: Defines the valid combination of flags for memory
+ * synchronization operations, restricting flags to only supported values.
*/
-typedef void *(*mem_map_func)(void *priv, unsigned int pfn);
+#define DHP_MEM_SYNC_READ (1 << 0)
+#define DHP_MEM_SYNC_WRITE (2 << 0)
+#define DHP_MEM_SYNC_RW (DHP_MEM_SYNC_READ | DHP_MEM_SYNC_WRITE)
+#define DHP_MEM_SYNC_START (0 << 2)
+#define DHP_MEM_SYNC_END (1 << 2)
+#define DHP_MEM_SYNC_VALID_FLAGS_MASK \
+ (DHP_MEM_SYNC_RW | DHP_MEM_SYNC_END)
+
+/**
+ * @brief Memory operations for handling device memory in the Data Handler Proxy (DHP).
+ *
+ * This structure defines the memory operations used for mapping, synchronizing,
+ * and unmapping device memory in the context of the Data Handler Proxy (DHP).
+ * These operations allow for flexible handling of memory for devices, enabling
+ * functions such as memory mapping, synchronization, and unmapping in both
+ * user space and device space.
+ *
+ * The structure provides the following operations:
+ * - `mmap`: Maps a memory region into the address space.
+ * - `msync`: Synchronizes memory between the device and CPU, ensuring data consistency.
+ * - `sgt_mmap`: Maps a scatter-gather table (SGT) of memory regions.
+ * - `sgt_msync`: Synchronizes a scatter-gather table of memory regions.
+ * - `unmmap`: Unmaps a previously mapped memory region.
+ */
+typedef struct dhp_mem_ops {
+ /**
+ * @brief Memory mapping function.
+ *
+ * This function maps a memory region into the process's virtual address space.
+ *
+ * @param priv Pointer to private data, typically device-specific context.
+ * @param addr Address to be mapped.
+ * @param size Size of the memory region to be mapped.
+ * @param uncached Flag to indicate whether the memory should be uncached.
+ *
+ * @return Pointer to the mapped memory region, or NULL on failure.
+ */
+ void *(*mmap)(void *priv, u64 addr, u32 size, u32 uncached);
+
+ /**
+ * @brief Memory synchronization function.
+ *
+ * This function ensures that the specified memory region is synchronized
+ * between the device and CPU, making sure that data is consistent.
+ *
+ * @param priv Pointer to private data, typically device-specific context.
+ * @param addr Address of the memory region to synchronize.
+ * @param size Size of the memory region to synchronize.
+ * @param flags Flags specifying the type of synchronization (e.g., read, write).
+ */
+ void (*msync)(void *priv, u64 addr, u32 size, u32 flags);
+
+ /**
+ * @brief Scatter-gather table memory mapping function.
+ *
+ * This function maps a set of memory regions specified by scatter-gather tables
+ * into the process's address space.
+ *
+ * @param priv Pointer to private data, typically device-specific context.
+ * @param uptr_array Array of user-space addresses to be mapped.
+ * @param pfn_array Array of physical frame numbers corresponding to the memory.
+ * @param num Number of memory regions in the scatter-gather table.
+ * @param uncached Flag to indicate whether the memory should be uncached.
+ *
+ * @return The actual valid length of memory successfully mapped to user-space on success,
+ * or a negative error code on failure.
+ */
+ int (*sgt_mmap)(void *priv, u64 *uptr_array, u64 *pfn_array, u32 num, u32 uncached);
+
+ /**
+ * @brief Scatter-gather table memory synchronization function.
+ *
+ * This function synchronizes a set of memory regions specified by scatter-gather
+ * tables, ensuring data consistency between the CPU and device.
+ *
+ * @param priv Pointer to private data, typically device-specific context.
+ * @param pfn_array Array of physical frame numbers corresponding to the memory.
+ * @param num Number of memory regions to synchronize.
+ * @param flags Flags specifying the type of synchronization (e.g., read, write).
+ */
+ void (*sgt_msync)(void *priv, u64 *pfn_array, u32 num, u32 flags);
+
+ /**
+ * @brief Unmapping function.
+ *
+ * This function unmaps a previously mapped memory region, releasing the
+ * resources associated with it.
+ *
+ * @param priv Pointer to private data, typically device-specific context.
+ * @param uptr Pointer to the user-space address of the memory to unmap.
+ * @param size Size of the memory region to unmap.
+ */
+ void (*unmmap)(void *priv, u8 *uptr, u32 size);
+} DhpMemOps;
+
/**
* @brief Decodes a YUV frame using the AVBC decoder.
*
* This function performs decoding of YUV frames from the provided header,
- * width, height, and other parameters. It stores the decoded frame in the
- * provided destination buffer.
+ * width, height, and other parameters. It processes the input data and
+ * stores the decoded frame in the provided destination buffer.
*
- * @param header Pointer to the AVBC frame header.
+ * The function leverages memory operations (mapping and synchronization)
+ * through the `DhpMemOps` structure, which allows for flexible handling
+ * of memory and device resources. This is useful when dealing with large
+ * video frame buffers or when optimizations related to uncached memory
+ * are required.
+ *
+ * @param header Pointer to the AVBC frame header, containing information
+ * about the encoded frame and its format.
* @param width Width of the frame in pixels.
* @param height Height of the frame in pixels.
- * @param stride Stride of the frame (bytes per row).
- * @param bitdepth Bit depth of the YUV frame.
+ * @param stride Stride of the frame (bytes per row), used to calculate
+ * the memory layout.
+ * @param bitdepth Bit depth of the YUV frame (e.g., 8, 10 bits per channel).
* @param dst_yuv Pointer to the destination buffer where the decoded YUV
- * frame will be stored.
- * @param dst_size Size of the destination buffer in bytes.
- * @param mMap Memory mapping function for handling memory mapped I/O.
- * @param priv Private data for the memory mapping function.
+ * frame will be stored. The buffer must be large enough
+ * to hold the entire decoded frame.
+ * @param dst_size Size of the destination buffer in bytes. This should
+ * be at least the size required for the decoded frame.
+ * @param uncached Flag to specify whether the memory should be uncached.
+ * This is typically set to 1 for performance-critical
+ * operations or when interacting with certain hardware devices.
+ * @param mOps Pointer to a `DhpMemOps` structure that defines the memory
+ * operations (e.g., memory mapping, synchronization) to be used
+ * for the decoding process. This allows the decoder to handle
+ * memory efficiently across different platforms and configurations.
+ * @param priv Pointer to private data that may be needed for the memory
+ * operations. This could include device-specific context or
+ * state required by the memory operations functions.
*
- * @return Returns 0 on success, or -1 on failure.
+ * @return Returns 0 on success, indicating the frame was successfully decoded
+ * and stored in the destination buffer. Returns -1 on failure,
+ * indicating an error during decoding.
*/
int aml_avbc_decode(void *header,
- unsigned int width,
- unsigned int height,
- unsigned int stride,
- unsigned int bitdepth,
- unsigned char *dst_yuv,
- unsigned int dst_size,
- mem_map_func mMap,
+ u32 width,
+ u32 height,
+ u32 stride,
+ u32 bitdepth,
+ u8 *dst_yuv,
+ u32 dst_size,
+ u32 uncached,
+ DhpMemOps *mOps,
void *priv);
/**
@@ -60,19 +190,31 @@
*
* int main() {
* // Sample parameters
- * unsigned int width = 1920;
- * unsigned int height = 1080;
- * unsigned int stride = width; // assuming 8-bit YUV
- * unsigned int bitdepth = 10;
- * unsigned char *dst_yuv;
- * unsigned int dst_size = stride * height * 3 / 2;
+ * u32 width = 1920;
+ * u32 height = 1080;
+ * u32 stride = width; // assuming 8-bit YUV
+ * u32 bitdepth = 10;
+ * u8 *dst_yuv;
+ * u32 dst_size = stride * height * 3 / 2;
* void *header = get_avbc_header(); // hypothetical function to get the header
+ * void *priv = get_priv_context();
+ * u32 uncached = get_uncached_mode();
*
- * // Memory mapping function (example)
- * mem_map_func my_mmap = my_memory_mapper; // hypothetical memory mapper function
- *
+ * // Memory operation function (example)
+ * DhpMemOps mOps = { .sgt_mmap = dhp_mem_sgt_mmap,
+ * .sgt_msync = dhp_mem_sgt_sync,
+ * .unmmap = dhp_mem_munmap };
* // Decode the frame
- * int ret = aml_avbc_decode(header, width, height, stride, bitdepth, dst_yuv, dst_size, my_mmap, NULL);
+ * int ret = aml_avbc_decode(header,
+ width,
+ height,
+ stride,
+ bitdepth,
+ dst_yuv,
+ dst_size,
+ uncached,
+ &mOps,
+ priv);
* if (ret == 0) {
* printf("Decoding successful!\n");
* } else {
diff --git a/drivers/frame_provider/aml_dhp/dhp_daemon/prebuild/lib/libavbc_soft_decoder.so b/drivers/frame_provider/aml_dhp/dhp_daemon/prebuild/lib/libavbc_soft_decoder.so
index d0be1b8..588d592 100644
--- a/drivers/frame_provider/aml_dhp/dhp_daemon/prebuild/lib/libavbc_soft_decoder.so
+++ b/drivers/frame_provider/aml_dhp/dhp_daemon/prebuild/lib/libavbc_soft_decoder.so
Binary files differ