123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442 |
- /*
- * Copyright (c) 2017 MediaTek Inc.
- * Author: Yunfei Dong <yunfei.dong@mediatek.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
- #include "mtk_vcodec_mem.h"
- /*
- * #undef pr_debug
- * #define pr_debug pr_info
- */
- struct mtk_vcu_queue *mtk_vcu_mem_init(struct device *dev,
- struct device *cmdq_dev)
- {
- struct mtk_vcu_queue *vcu_queue;
- pr_debug("Allocate new vcu queue !\n");
- vcu_queue = kzalloc(sizeof(struct mtk_vcu_queue), GFP_KERNEL);
- if (vcu_queue == NULL) {
- pr_info("Allocate new vcu queue fail!\n");
- return NULL;
- }
- INIT_LIST_HEAD(&vcu_queue->pa_pages.list);
- vcu_queue->mem_ops = &vb2_dma_contig_memops;
- vcu_queue->dev = dev;
- vcu_queue->cmdq_dev = cmdq_dev;
- vcu_queue->num_buffers = 0;
- vcu_queue->map_buf_pa = 0;
- mutex_init(&vcu_queue->mmap_lock);
- return vcu_queue;
- }
- void mtk_vcu_mem_release(struct mtk_vcu_queue *vcu_queue)
- {
- struct mtk_vcu_mem *vcu_buffer;
- unsigned int buffer;
- struct vcu_pa_pages *tmp;
- struct list_head *p, *q;
- mutex_lock(&vcu_queue->mmap_lock);
- pr_debug("Release vcu queue !\n");
- if (vcu_queue->num_buffers != 0) {
- for (buffer = 0; buffer < vcu_queue->num_buffers; buffer++) {
- vcu_buffer = &vcu_queue->bufs[buffer];
- if (vcu_buffer->dbuf == NULL)
- vcu_queue->mem_ops->put(vcu_buffer->mem_priv);
- else
- fput(vcu_buffer->dbuf->file);
- pr_debug("Free %d dbuf = %p size = %d mem_priv = %lx ref_cnt = %d\n",
- buffer, vcu_buffer->dbuf,
- (unsigned int)vcu_buffer->size,
- (unsigned long)vcu_buffer->mem_priv,
- atomic_read(&vcu_buffer->ref_cnt));
- }
- }
- list_for_each_safe(p, q, &vcu_queue->pa_pages.list) {
- tmp = list_entry(p, struct vcu_pa_pages, list);
- cmdq_mbox_buf_free(
- vcu_queue->cmdq_dev,
- (void *)(unsigned long)tmp->kva,
- (dma_addr_t)tmp->pa);
- pr_info("Free cmdq pa %llx ref_cnt = %d\n", tmp->pa,
- atomic_read(&tmp->ref_cnt));
- list_del(p);
- kfree(tmp);
- }
- mutex_unlock(&vcu_queue->mmap_lock);
- kfree(vcu_queue);
- vcu_queue = NULL;
- }
- void *mtk_vcu_set_buffer(struct mtk_vcu_queue *vcu_queue,
- struct mem_obj *mem_buff_data, struct vb2_buffer *src_vb,
- struct vb2_buffer *dst_vb)
- {
- struct mtk_vcu_mem *vcu_buffer;
- unsigned int num_buffers, plane;
- unsigned int buffer;
- dma_addr_t *dma_addr = NULL;
- struct dma_buf *dbuf = NULL;
- int op;
- pr_debug("[%s] %d iova = %llx src_vb = %p dst_vb = %p\n",
- __func__, vcu_queue->num_buffers, mem_buff_data->iova,
- src_vb, dst_vb);
- num_buffers = vcu_queue->num_buffers;
- if (mem_buff_data->len > CODEC_ALLOCATE_MAX_BUFFER_SIZE ||
- mem_buff_data->len == 0U || num_buffers >= CODEC_MAX_BUFFER) {
- pr_info("Set buffer fail: buffer len = %u num_buffers = %d !!\n",
- mem_buff_data->len, num_buffers);
- return ERR_PTR(-EINVAL);
- }
- mutex_lock(&vcu_queue->mmap_lock);
- for (buffer = 0; buffer < num_buffers; buffer++) {
- vcu_buffer = &vcu_queue->bufs[buffer];
- if (mem_buff_data->iova == (u64)vcu_buffer->iova) {
- atomic_inc(&vcu_buffer->ref_cnt);
- mutex_unlock(&vcu_queue->mmap_lock);
- return vcu_buffer->mem_priv;
- }
- }
- vcu_buffer = &vcu_queue->bufs[num_buffers];
- if (dbuf == NULL && src_vb != NULL)
- for (plane = 0; plane < src_vb->num_planes; plane++) {
- dma_addr = src_vb->vb2_queue->mem_ops->cookie(
- src_vb->planes[plane].mem_priv);
- if (*dma_addr == mem_buff_data->iova) {
- dbuf = src_vb->planes[plane].dbuf;
- vcu_buffer->size = src_vb->planes[plane].length;
- vcu_buffer->mem_priv = src_vb->planes[plane].mem_priv;
- op = DMA_TO_DEVICE;
- pr_debug("src size = %d mem_buff_data len = %d\n",
- (unsigned int)vcu_buffer->size,
- (unsigned int)mem_buff_data->len);
- }
- }
- if (dbuf == NULL && dst_vb != NULL)
- for (plane = 0; plane < dst_vb->num_planes; plane++) {
- dma_addr = dst_vb->vb2_queue->mem_ops->cookie(
- dst_vb->planes[plane].mem_priv);
- if (*dma_addr == mem_buff_data->iova) {
- dbuf = dst_vb->planes[plane].dbuf;
- vcu_buffer->size = dst_vb->planes[plane].length;
- vcu_buffer->mem_priv = dst_vb->planes[plane].mem_priv;
- op = DMA_FROM_DEVICE;
- pr_debug("dst size = %d mem_buff_data len = %d\n",
- (unsigned int)vcu_buffer->size,
- (unsigned int)mem_buff_data->len);
- }
- }
- if (dbuf == NULL) {
- mutex_unlock(&vcu_queue->mmap_lock);
- pr_debug("Set buffer not found: buffer len = %u iova = %llx !!\n",
- mem_buff_data->len, mem_buff_data->iova);
- return ERR_PTR(-ENOMEM);
- }
- vcu_buffer->dbuf = dbuf;
- vcu_buffer->iova = *dma_addr;
- get_file(dbuf->file);
- vcu_queue->num_buffers++;
- atomic_set(&vcu_buffer->ref_cnt, 1);
- mutex_unlock(&vcu_queue->mmap_lock);
- pr_debug("[%s] Num_buffers = %d iova = %llx dbuf = %p size = %d mem_priv = %lx\n",
- __func__, vcu_queue->num_buffers, mem_buff_data->iova,
- vcu_buffer->dbuf, (unsigned int)vcu_buffer->size,
- (unsigned long)vcu_buffer->mem_priv);
- return vcu_buffer->mem_priv;
- }
- void *mtk_vcu_get_buffer(struct mtk_vcu_queue *vcu_queue,
- struct mem_obj *mem_buff_data)
- {
- void *cook, *dma_addr;
- struct mtk_vcu_mem *vcu_buffer;
- unsigned int buffers;
- buffers = vcu_queue->num_buffers;
- if (mem_buff_data->len > CODEC_ALLOCATE_MAX_BUFFER_SIZE ||
- mem_buff_data->len == 0U || buffers >= CODEC_MAX_BUFFER) {
- pr_info("Get buffer fail: buffer len = %u num_buffers = %d !!\n",
- mem_buff_data->len, buffers);
- return ERR_PTR(-EINVAL);
- }
- mutex_lock(&vcu_queue->mmap_lock);
- vcu_buffer = &vcu_queue->bufs[buffers];
- vcu_buffer->mem_priv = vcu_queue->mem_ops->alloc(vcu_queue->dev, 0,
- mem_buff_data->len, 0, 0);
- vcu_buffer->size = mem_buff_data->len;
- vcu_buffer->dbuf = NULL;
- if (IS_ERR_OR_NULL(vcu_buffer->mem_priv)) {
- mutex_unlock(&vcu_queue->mmap_lock);
- return ERR_PTR(-ENOMEM);
- }
- cook = vcu_queue->mem_ops->vaddr(vcu_buffer->mem_priv);
- dma_addr = vcu_queue->mem_ops->cookie(vcu_buffer->mem_priv);
- mem_buff_data->iova = *(dma_addr_t *)dma_addr;
- vcu_buffer->iova = *(dma_addr_t *)dma_addr;
- mem_buff_data->va = CODEC_MSK((unsigned long)cook);
- mem_buff_data->pa = 0;
- vcu_queue->num_buffers++;
- mutex_unlock(&vcu_queue->mmap_lock);
- atomic_set(&vcu_buffer->ref_cnt, 1);
- pr_debug("[%s] Num_buffers = %d iova = %llx va = %llx size = %d mem_priv = %lx\n",
- __func__, vcu_queue->num_buffers, mem_buff_data->iova,
- mem_buff_data->va, (unsigned int)vcu_buffer->size,
- (unsigned long)vcu_buffer->mem_priv);
- return vcu_buffer->mem_priv;
- }
- void *mtk_vcu_get_page(struct mtk_vcu_queue *vcu_queue,
- struct mem_obj *mem_buff_data)
- {
- dma_addr_t temp_pa = 0;
- void *mem_priv;
- struct vcu_pa_pages *tmp;
- mem_priv =
- cmdq_mbox_buf_alloc(vcu_queue->cmdq_dev, &temp_pa);
- tmp = kmalloc(sizeof(struct vcu_pa_pages), GFP_KERNEL);
- if (!tmp)
- return ERR_PTR(-ENOMEM);
- mutex_lock(&vcu_queue->mmap_lock);
- tmp->pa = temp_pa;
- mem_buff_data->pa = temp_pa;
- tmp->kva = (unsigned long)mem_priv;
- mem_buff_data->va = CODEC_MSK((unsigned long)mem_priv);
- mem_buff_data->iova = 0;
- atomic_set(&tmp->ref_cnt, 1);
- list_add_tail(&tmp->list, &vcu_queue->pa_pages.list);
- mutex_unlock(&vcu_queue->mmap_lock);
- return mem_priv;
- }
- int mtk_vcu_free_buffer(struct mtk_vcu_queue *vcu_queue,
- struct mem_obj *mem_buff_data)
- {
- struct mtk_vcu_mem *vcu_buffer;
- void *cook, *dma_addr;
- unsigned int buffer, num_buffers, last_buffer;
- int ret = -EINVAL;
- mutex_lock(&vcu_queue->mmap_lock);
- num_buffers = vcu_queue->num_buffers;
- if (num_buffers != 0U) {
- for (buffer = 0; buffer < num_buffers; buffer++) {
- vcu_buffer = &vcu_queue->bufs[buffer];
- if (vcu_buffer->dbuf != NULL)
- continue;
- cook = vcu_queue->mem_ops->vaddr(vcu_buffer->mem_priv);
- dma_addr =
- vcu_queue->mem_ops->cookie(
- vcu_buffer->mem_priv);
- if (mem_buff_data->va == CODEC_MSK((unsigned long)cook) &&
- mem_buff_data->iova == *(dma_addr_t *)dma_addr &&
- mem_buff_data->len == vcu_buffer->size &&
- atomic_read(&vcu_buffer->ref_cnt) == 1) {
- pr_debug("Free buff = %d iova = %llx va = %llx, queue_num = %d\n",
- buffer, mem_buff_data->iova,
- mem_buff_data->va,
- num_buffers);
- vcu_queue->mem_ops->put(vcu_buffer->mem_priv);
- atomic_dec(&vcu_buffer->ref_cnt);
- last_buffer = num_buffers - 1U;
- if (last_buffer != buffer)
- vcu_queue->bufs[buffer] =
- vcu_queue->bufs[last_buffer];
- vcu_queue->bufs[last_buffer].mem_priv = NULL;
- vcu_queue->bufs[last_buffer].size = 0;
- vcu_queue->bufs[last_buffer].dbuf = NULL;
- vcu_queue->num_buffers--;
- ret = 0;
- break;
- }
- }
- }
- mutex_unlock(&vcu_queue->mmap_lock);
- if (ret != 0)
- pr_info("Can not free memory va %llx iova %llx len %u!\n",
- mem_buff_data->va, mem_buff_data->iova,
- mem_buff_data->len);
- return ret;
- }
- int mtk_vcu_free_page(struct mtk_vcu_queue *vcu_queue,
- struct mem_obj *mem_buff_data)
- {
- int ret = -EINVAL;
- struct vcu_pa_pages *tmp;
- struct list_head *p, *q;
- mutex_lock(&vcu_queue->mmap_lock);
- list_for_each_safe(p, q, &vcu_queue->pa_pages.list) {
- tmp = list_entry(p, struct vcu_pa_pages, list);
- if (tmp->pa == mem_buff_data->pa &&
- CODEC_MSK(tmp->kva) == mem_buff_data->va &&
- atomic_read(&tmp->ref_cnt) == 1) {
- ret = 0;
- cmdq_mbox_buf_free(
- vcu_queue->cmdq_dev,
- (void *)(unsigned long)
- tmp->kva,
- (dma_addr_t)mem_buff_data->pa);
- atomic_dec(&tmp->ref_cnt);
- list_del(p);
- kfree(tmp);
- break;
- }
- }
- mutex_unlock(&vcu_queue->mmap_lock);
- if (ret != 0)
- pr_info("Can not free memory va %llx pa %llx len %u!\n",
- mem_buff_data->va, mem_buff_data->pa,
- mem_buff_data->len);
- return ret;
- }
- void mtk_vcu_buffer_ref_dec(struct mtk_vcu_queue *vcu_queue,
- void *mem_priv)
- {
- struct mtk_vcu_mem *vcu_buffer;
- unsigned int buffer, num_buffers;
- mutex_lock(&vcu_queue->mmap_lock);
- num_buffers = vcu_queue->num_buffers;
- for (buffer = 0; buffer < num_buffers; buffer++) {
- vcu_buffer = &vcu_queue->bufs[buffer];
- if (vcu_buffer->mem_priv == mem_priv) {
- if (atomic_read(&vcu_buffer->ref_cnt) > 0)
- atomic_dec(&vcu_buffer->ref_cnt);
- else
- pr_info("[VCU][Error] %s fail\n", __func__);
- }
- }
- mutex_unlock(&vcu_queue->mmap_lock);
- }
- void vcu_io_buffer_cache_sync(struct device *dev,
- struct dma_buf *dbuf, int op)
- {
- struct dma_buf_attachment *buf_att;
- struct sg_table *sgt;
- buf_att = dma_buf_attach(dbuf, dev);
- sgt = dma_buf_map_attachment(buf_att, op);
- dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, op);
- dma_buf_unmap_attachment(buf_att, sgt, op);
- dma_buf_detach(dbuf, buf_att);
- }
- int vcu_buffer_flush_all(struct device *dev, struct mtk_vcu_queue *vcu_queue)
- {
- struct mtk_vcu_mem *vcu_buffer;
- unsigned int buffer, num_buffers;
- void *cook = NULL;
- num_buffers = vcu_queue->num_buffers;
- if (num_buffers == 0U)
- return 0;
- for (buffer = 0; buffer < num_buffers; buffer++) {
- vcu_buffer = &vcu_queue->bufs[buffer];
- pr_debug("Cache clean %s buffer=%d iova=%lx size=%d num=%d\n",
- (vcu_buffer->dbuf == NULL) ? "working" : "io",
- buffer, (unsigned int long)vcu_buffer->iova,
- (unsigned int)vcu_buffer->size, num_buffers);
- if (vcu_buffer->dbuf == NULL) {
- cook = vcu_queue->mem_ops->vaddr(
- vcu_buffer->mem_priv);
- dmac_map_area((void *)cook, vcu_buffer->size,
- DMA_TO_DEVICE);
- } else
- vcu_io_buffer_cache_sync(dev,
- vcu_buffer->dbuf, DMA_TO_DEVICE);
- }
- return 0;
- }
- int vcu_buffer_cache_sync(struct device *dev, struct mtk_vcu_queue *vcu_queue,
- dma_addr_t dma_addr, size_t size, int op)
- {
- struct mtk_vcu_mem *vcu_buffer;
- unsigned int num_buffers = 0;
- unsigned int buffer = 0;
- void *cook = NULL;
- num_buffers = vcu_queue->num_buffers;
- if (num_buffers == 0U) {
- pr_info("Cache %s buffer fail, iova = %lx, size = %d, vcu no buffers\n",
- (op == DMA_TO_DEVICE) ? "flush" : "invalidate",
- (unsigned long)dma_addr, (unsigned int)size);
- return -1;
- }
- for (buffer = 0; buffer < num_buffers; buffer++) {
- vcu_buffer = &vcu_queue->bufs[buffer];
- if ((dma_addr + size) <=
- (vcu_buffer->iova + vcu_buffer->size) &&
- dma_addr >= vcu_buffer->iova) {
- pr_debug("Cache %s %s buffer iova=%lx range=%d (%lx %d)\n",
- (op == DMA_TO_DEVICE) ?
- "clean" : "invalidate",
- (vcu_buffer->dbuf == NULL) ?
- "working" : "io",
- (unsigned long)dma_addr, (unsigned int)size,
- (unsigned long)vcu_buffer->iova,
- (unsigned int)vcu_buffer->size);
- if (vcu_buffer->dbuf == NULL) {
- cook = vcu_queue->mem_ops->vaddr(
- vcu_buffer->mem_priv);
- if (op == DMA_TO_DEVICE)
- dmac_map_area((void *)cook, size, op);
- else
- dmac_unmap_area((void *)cook, size, op);
- } else
- vcu_io_buffer_cache_sync(dev,
- vcu_buffer->dbuf, op);
- return 0;
- }
- }
- pr_info("Cache %s buffer fail, iova = %lx, size = %d\n",
- (op == DMA_TO_DEVICE) ? "flush" : "invalidate",
- (unsigned long)dma_addr, (unsigned int)size);
- return -1;
- }
|