forked from MIrrors/gasket-driver
staging: gasket: page_table: add mapping flags
This allows for more precise dma_direction in the dma_map_page requests. Also leaves room for adding more flags later. Change-Id: I46feecc2638b7ad0d34548cc10837549a3928011 Signed-off-by: Nick Ewalt <nicholasewalt@google.com> Signed-off-by: Todd Poynor <toddpoynor@google.com>
This commit is contained in:
33
gasket.h
33
gasket.h
@@ -37,6 +37,31 @@ struct gasket_page_table_ioctl {
|
||||
u64 device_address;
|
||||
};
|
||||
|
||||
/*
|
||||
* Structure for ioctl mapping buffers with flags when using the Gasket
|
||||
* page_table module.
|
||||
*/
|
||||
struct gasket_page_table_ioctl_flags {
|
||||
struct gasket_page_table_ioctl base;
|
||||
/*
|
||||
* Flags indicating status and attribute requests from the host.
|
||||
* NOTE: STATUS bit does not need to be set in this request.
|
||||
* Set RESERVED bits to 0 to ensure backwards compatibility.
|
||||
*
|
||||
* Bitfields:
|
||||
* [0] - STATUS: indicates if this entry/slot is free
|
||||
* 0 = PTE_FREE
|
||||
* 1 = PTE_INUSE
|
||||
* [2:1] - DMA_DIRECTION: dma_data_direction requested by host
|
||||
* 00 = DMA_BIDIRECTIONAL
|
||||
* 01 = DMA_TO_DEVICE
|
||||
* 10 = DMA_FROM_DEVICE
|
||||
* 11 = DMA_NONE
|
||||
* [31:3] - RESERVED
|
||||
*/
|
||||
u32 flags;
|
||||
};
|
||||
|
||||
/*
|
||||
* Common structure for ioctls mapping and unmapping buffers when using the
|
||||
* Gasket page_table module.
|
||||
@@ -119,4 +144,12 @@ struct gasket_coherent_alloc_config_ioctl {
|
||||
#define GASKET_IOCTL_CONFIG_COHERENT_ALLOCATOR \
|
||||
_IOWR(GASKET_IOCTL_BASE, 11, struct gasket_coherent_alloc_config_ioctl)
|
||||
|
||||
/*
|
||||
* Tells the kernel to map size bytes at host_address to device_address in
|
||||
* page_table_index page table. Passes flags to indicate additional attribute
|
||||
* requests for the mapped memory.
|
||||
*/
|
||||
#define GASKET_IOCTL_MAP_BUFFER_FLAGS \
|
||||
_IOW(GASKET_IOCTL_BASE, 12, struct gasket_page_table_ioctl_flags)
|
||||
|
||||
#endif /* __GASKET_H__ */
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#define trace_gasket_ioctl_integer_data(x)
|
||||
#define trace_gasket_ioctl_eventfd_data(x, ...)
|
||||
#define trace_gasket_ioctl_page_table_data(x, ...)
|
||||
#define trace_gasket_ioctl_page_table_flags_data(x, ...)
|
||||
#define trace_gasket_ioctl_config_coherent_allocator(x, ...)
|
||||
#endif
|
||||
|
||||
@@ -130,29 +131,59 @@ static int gasket_partition_page_table(
|
||||
}
|
||||
|
||||
/* Map a userspace buffer to a device virtual address. */
|
||||
static int gasket_map_buffers_common(struct gasket_dev *gasket_dev,
|
||||
struct gasket_page_table_ioctl_flags
|
||||
*pibuf)
|
||||
{
|
||||
if (pibuf->base.page_table_index >= gasket_dev->num_page_tables)
|
||||
return -EFAULT;
|
||||
|
||||
if (gasket_page_table_are_addrs_bad(gasket_dev->page_table[pibuf->base.page_table_index],
|
||||
pibuf->base.host_address,
|
||||
pibuf->base.device_address,
|
||||
pibuf->base.size))
|
||||
return -EINVAL;
|
||||
|
||||
return gasket_page_table_map(gasket_dev->page_table[pibuf->base.page_table_index],
|
||||
pibuf->base.host_address,
|
||||
pibuf->base.device_address,
|
||||
pibuf->base.size / PAGE_SIZE,
|
||||
pibuf->flags);
|
||||
}
|
||||
|
||||
static int gasket_map_buffers(struct gasket_dev *gasket_dev,
|
||||
struct gasket_page_table_ioctl __user *argp)
|
||||
{
|
||||
struct gasket_page_table_ioctl ibuf;
|
||||
struct gasket_page_table_ioctl_flags ibuf;
|
||||
|
||||
if (copy_from_user(&ibuf.base, argp, sizeof(struct gasket_page_table_ioctl)))
|
||||
return -EFAULT;
|
||||
|
||||
ibuf.flags = 0;
|
||||
|
||||
trace_gasket_ioctl_page_table_data(ibuf.base.page_table_index,
|
||||
ibuf.base.size,
|
||||
ibuf.base.host_address,
|
||||
ibuf.base.device_address);
|
||||
|
||||
if (copy_from_user(&ibuf, argp, sizeof(struct gasket_page_table_ioctl)))
|
||||
return -EFAULT;
|
||||
return gasket_map_buffers_common(gasket_dev, &ibuf);
|
||||
}
|
||||
|
||||
trace_gasket_ioctl_page_table_data(ibuf.page_table_index, ibuf.size,
|
||||
ibuf.host_address,
|
||||
ibuf.device_address);
|
||||
static int gasket_map_buffers_flags(struct gasket_dev *gasket_dev,
|
||||
struct gasket_page_table_ioctl_flags __user *argp)
|
||||
{
|
||||
struct gasket_page_table_ioctl_flags ibuf;
|
||||
|
||||
if (ibuf.page_table_index >= gasket_dev->num_page_tables)
|
||||
return -EFAULT;
|
||||
|
||||
if (gasket_page_table_are_addrs_bad(gasket_dev->page_table[ibuf.page_table_index],
|
||||
ibuf.host_address,
|
||||
ibuf.device_address, ibuf.size))
|
||||
return -EINVAL;
|
||||
|
||||
return gasket_page_table_map(gasket_dev->page_table[ibuf.page_table_index],
|
||||
ibuf.host_address, ibuf.device_address,
|
||||
ibuf.size / PAGE_SIZE);
|
||||
if (copy_from_user(&ibuf, argp, sizeof(struct gasket_page_table_ioctl_flags)))
|
||||
return -EFAULT;
|
||||
|
||||
trace_gasket_ioctl_page_table_flags_data(ibuf.base.page_table_index,
|
||||
ibuf.base.size,
|
||||
ibuf.base.host_address,
|
||||
ibuf.base.device_address,
|
||||
ibuf.flags);
|
||||
|
||||
return gasket_map_buffers_common(gasket_dev, &ibuf);
|
||||
}
|
||||
|
||||
/* Unmap a userspace buffer from a device virtual address. */
|
||||
@@ -258,6 +289,7 @@ static bool gasket_ioctl_check_permissions(struct file *filp, uint cmd)
|
||||
return alive && write;
|
||||
|
||||
case GASKET_IOCTL_MAP_BUFFER:
|
||||
case GASKET_IOCTL_MAP_BUFFER_FLAGS:
|
||||
case GASKET_IOCTL_UNMAP_BUFFER:
|
||||
return alive && write;
|
||||
|
||||
@@ -342,6 +374,9 @@ long gasket_handle_ioctl(struct file *filp, uint cmd, void __user *argp)
|
||||
case GASKET_IOCTL_MAP_BUFFER:
|
||||
retval = gasket_map_buffers(gasket_dev, argp);
|
||||
break;
|
||||
case GASKET_IOCTL_MAP_BUFFER_FLAGS:
|
||||
retval = gasket_map_buffers_flags(gasket_dev, argp);
|
||||
break;
|
||||
case GASKET_IOCTL_CONFIG_COHERENT_ALLOCATOR:
|
||||
retval = gasket_config_coherent_allocator(gasket_dev, argp);
|
||||
break;
|
||||
@@ -387,6 +422,7 @@ long gasket_is_supported_ioctl(uint cmd)
|
||||
case GASKET_IOCTL_PAGE_TABLE_SIZE:
|
||||
case GASKET_IOCTL_SIMPLE_PAGE_TABLE_SIZE:
|
||||
case GASKET_IOCTL_MAP_BUFFER:
|
||||
case GASKET_IOCTL_MAP_BUFFER_FLAGS:
|
||||
case GASKET_IOCTL_UNMAP_BUFFER:
|
||||
case GASKET_IOCTL_CLEAR_INTERRUPT_COUNTS:
|
||||
case GASKET_IOCTL_CONFIG_COHERENT_ALLOCATOR:
|
||||
|
||||
@@ -79,6 +79,19 @@
|
||||
*/
|
||||
#define GASKET_EXTENDED_LVL1_SHIFT 12
|
||||
|
||||
/*
|
||||
* Utilities for accessing flags bitfields.
|
||||
*/
|
||||
#define MASK(field) (((1u << field##_WIDTH) - 1) << field##_SHIFT)
|
||||
#define GET(field, flags) (((flags) & MASK(field)) >> field##_SHIFT)
|
||||
#define SET(field, flags, val) (((flags) & ~MASK(field)) | ((val) << field##_SHIFT))
|
||||
|
||||
#define FLAGS_STATUS_SHIFT 0
|
||||
#define FLAGS_STATUS_WIDTH 1
|
||||
|
||||
#define FLAGS_DMA_DIRECTION_SHIFT 1
|
||||
#define FLAGS_DMA_DIRECTION_WIDTH 2
|
||||
|
||||
/* Type declarations */
|
||||
/* Valid states for a struct gasket_page_table_entry. */
|
||||
enum pte_status {
|
||||
@@ -100,8 +113,12 @@ enum pte_status {
|
||||
* to the actual page mapped and described by this structure.
|
||||
*/
|
||||
struct gasket_page_table_entry {
|
||||
/* The status of this entry/slot: free or in use. */
|
||||
enum pte_status status;
|
||||
/*
|
||||
* Internal structure matches gasket_page_table_ioctl_flags.flags.
|
||||
* NOTE: All fields should have a default value of 0. This ensures that
|
||||
* the kernel will be backwards compatible with old drivers.
|
||||
*/
|
||||
u32 flags;
|
||||
|
||||
/*
|
||||
* Index for alignment into host vaddrs.
|
||||
@@ -297,7 +314,7 @@ static bool gasket_is_pte_range_free(struct gasket_page_table_entry *ptes,
|
||||
int i;
|
||||
|
||||
for (i = 0; i < num_entries; i++) {
|
||||
if (ptes[i].status != PTE_FREE)
|
||||
if (GET(FLAGS_STATUS, ptes[i].flags) != PTE_FREE)
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -313,7 +330,7 @@ static void gasket_free_extended_subtable(struct gasket_page_table *pg_tbl,
|
||||
u64 __iomem *slot)
|
||||
{
|
||||
/* Release the page table from the driver */
|
||||
pte->status = PTE_FREE;
|
||||
pte->flags = SET(FLAGS_STATUS, pte->flags, PTE_FREE);
|
||||
|
||||
/* Release the page table from the device */
|
||||
writeq(0, slot);
|
||||
@@ -347,7 +364,7 @@ gasket_page_table_garbage_collect_nolock(struct gasket_page_table *pg_tbl)
|
||||
slot = pg_tbl->base_slot + pg_tbl->num_simple_entries;
|
||||
pte < pg_tbl->entries + pg_tbl->config.total_entries;
|
||||
pte++, slot++) {
|
||||
if (pte->status == PTE_INUSE) {
|
||||
if (GET(FLAGS_STATUS, pte->flags) == PTE_INUSE) {
|
||||
if (gasket_is_pte_range_free(pte->sublevel,
|
||||
GASKET_PAGES_PER_SUBTABLE))
|
||||
gasket_free_extended_subtable(pg_tbl, pte,
|
||||
@@ -396,7 +413,7 @@ int gasket_page_table_partition(struct gasket_page_table *pg_tbl,
|
||||
start = min(pg_tbl->num_simple_entries, num_simple_entries);
|
||||
|
||||
for (i = start; i < pg_tbl->config.total_entries; i++) {
|
||||
if (pg_tbl->entries[i].status != PTE_FREE) {
|
||||
if (GET(FLAGS_STATUS, pg_tbl->entries[i].flags) != PTE_FREE) {
|
||||
dev_err(pg_tbl->device, "entry %d is not free\n", i);
|
||||
mutex_unlock(&pg_tbl->mutex);
|
||||
return -EBUSY;
|
||||
@@ -444,7 +461,8 @@ static int is_coherent(struct gasket_page_table *pg_tbl, ulong host_addr)
|
||||
static int gasket_perform_mapping(struct gasket_page_table *pg_tbl,
|
||||
struct gasket_page_table_entry *ptes,
|
||||
u64 __iomem *slots, ulong host_addr,
|
||||
uint num_pages, int is_simple_mapping)
|
||||
uint num_pages, u32 flags,
|
||||
int is_simple_mapping)
|
||||
{
|
||||
int ret;
|
||||
ulong offset;
|
||||
@@ -453,6 +471,12 @@ static int gasket_perform_mapping(struct gasket_page_table *pg_tbl,
|
||||
ulong page_addr;
|
||||
int i;
|
||||
|
||||
if (GET(FLAGS_DMA_DIRECTION, flags) == DMA_NONE) {
|
||||
dev_err(pg_tbl->device, "invalid DMA direction flags=0x%lx\n",
|
||||
(unsigned long)flags);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for (i = 0; i < num_pages; i++) {
|
||||
page_addr = host_addr + i * PAGE_SIZE;
|
||||
offset = page_addr & (PAGE_SIZE - 1);
|
||||
@@ -482,9 +506,8 @@ static int gasket_perform_mapping(struct gasket_page_table *pg_tbl,
|
||||
ptes[i].offset = offset;
|
||||
|
||||
/* Map the page into DMA space. */
|
||||
ptes[i].dma_addr =
|
||||
dma_map_page(pg_tbl->device, page, 0, PAGE_SIZE,
|
||||
DMA_BIDIRECTIONAL);
|
||||
ptes[i].dma_addr = dma_map_page(pg_tbl->device, page, 0, PAGE_SIZE,
|
||||
GET(FLAGS_DMA_DIRECTION, flags));
|
||||
dev_dbg(pg_tbl->device,
|
||||
"%s i %d pte %p pfn %p -> mapped %llx\n",
|
||||
__func__, i, &ptes[i],
|
||||
@@ -518,7 +541,9 @@ static int gasket_perform_mapping(struct gasket_page_table *pg_tbl,
|
||||
(void *)&((u64 __force *)slots)[i],
|
||||
sizeof(u64), DMA_TO_DEVICE);
|
||||
}
|
||||
ptes[i].status = PTE_INUSE;
|
||||
|
||||
/* Set PTE flags equal to flags param with STATUS=PTE_INUSE. */
|
||||
ptes[i].flags = SET(FLAGS_STATUS, flags, PTE_INUSE);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@@ -600,7 +625,8 @@ static void gasket_perform_unmapping(struct gasket_page_table *pg_tbl,
|
||||
*/
|
||||
for (i = 0; i < num_pages; i++) {
|
||||
/* release the address from the device, */
|
||||
if (is_simple_mapping || ptes[i].status == PTE_INUSE) {
|
||||
if (is_simple_mapping ||
|
||||
GET(FLAGS_STATUS, ptes[i].flags) == PTE_INUSE) {
|
||||
writeq(0, &slots[i]);
|
||||
} else {
|
||||
((u64 __force *)slots)[i] = 0;
|
||||
@@ -609,11 +635,10 @@ static void gasket_perform_unmapping(struct gasket_page_table *pg_tbl,
|
||||
}
|
||||
|
||||
/* release the address from the driver, */
|
||||
if (ptes[i].status == PTE_INUSE) {
|
||||
if (GET(FLAGS_STATUS, ptes[i].flags) == PTE_INUSE) {
|
||||
if (ptes[i].dma_addr) {
|
||||
dma_unmap_page(pg_tbl->device, ptes[i].dma_addr,
|
||||
PAGE_SIZE, DMA_BIDIRECTIONAL);
|
||||
}
|
||||
dma_unmap_page(pg_tbl->device, ptes[i].dma_addr, PAGE_SIZE,
|
||||
GET(FLAGS_DMA_DIRECTION, ptes[i].flags)); }
|
||||
if (gasket_release_page(ptes[i].page))
|
||||
--pg_tbl->num_active_pages;
|
||||
}
|
||||
@@ -656,7 +681,7 @@ static void gasket_unmap_extended_pages(struct gasket_page_table *pg_tbl,
|
||||
/* TODO: Add check to ensure pte remains valid? */
|
||||
len = min(remain, GASKET_PAGES_PER_SUBTABLE - slot_idx);
|
||||
|
||||
if (pte->status == PTE_INUSE) {
|
||||
if (GET(FLAGS_STATUS, pte->flags) == PTE_INUSE) {
|
||||
slot_base = (u64 __iomem *)(page_address(pte->page) +
|
||||
pte->offset);
|
||||
gasket_perform_unmapping(pg_tbl,
|
||||
@@ -835,7 +860,7 @@ static void gasket_page_table_unmap_nolock(struct gasket_page_table *pg_tbl,
|
||||
*/
|
||||
static int gasket_map_simple_pages(struct gasket_page_table *pg_tbl,
|
||||
ulong host_addr, ulong dev_addr,
|
||||
uint num_pages)
|
||||
uint num_pages, u32 flags)
|
||||
{
|
||||
int ret;
|
||||
uint slot_idx = gasket_simple_page_idx(pg_tbl, dev_addr);
|
||||
@@ -850,7 +875,7 @@ static int gasket_map_simple_pages(struct gasket_page_table *pg_tbl,
|
||||
|
||||
ret = gasket_perform_mapping(pg_tbl, pg_tbl->entries + slot_idx,
|
||||
pg_tbl->base_slot + slot_idx, host_addr,
|
||||
num_pages, 1);
|
||||
num_pages, flags, 1);
|
||||
|
||||
if (ret) {
|
||||
gasket_page_table_unmap_nolock(pg_tbl, dev_addr, num_pages);
|
||||
@@ -899,7 +924,7 @@ static int gasket_alloc_extended_subtable(struct gasket_page_table *pg_tbl,
|
||||
dma_addr = (pte->dma_addr + pte->offset) | GASKET_VALID_SLOT_FLAG;
|
||||
writeq(dma_addr, slot);
|
||||
|
||||
pte->status = PTE_INUSE;
|
||||
pte->flags = SET(FLAGS_STATUS, pte->flags, PTE_INUSE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -937,7 +962,7 @@ static int gasket_alloc_extended_entries(struct gasket_page_table *pg_tbl,
|
||||
len = min(remain,
|
||||
GASKET_PAGES_PER_SUBTABLE - subtable_slot_idx);
|
||||
|
||||
if (pte->status == PTE_FREE) {
|
||||
if (GET(FLAGS_STATUS, pte->flags) == PTE_FREE) {
|
||||
ret = gasket_alloc_extended_subtable(pg_tbl, pte, slot);
|
||||
if (ret) {
|
||||
dev_err(pg_tbl->device,
|
||||
@@ -965,7 +990,7 @@ static int gasket_alloc_extended_entries(struct gasket_page_table *pg_tbl,
|
||||
*/
|
||||
static int gasket_map_extended_pages(struct gasket_page_table *pg_tbl,
|
||||
ulong host_addr, ulong dev_addr,
|
||||
uint num_pages)
|
||||
uint num_pages, u32 flags)
|
||||
{
|
||||
int ret;
|
||||
ulong dev_addr_end;
|
||||
@@ -999,7 +1024,7 @@ static int gasket_map_extended_pages(struct gasket_page_table *pg_tbl,
|
||||
(u64 __iomem *)(page_address(pte->page) + pte->offset);
|
||||
ret = gasket_perform_mapping(pg_tbl, pte->sublevel + slot_idx,
|
||||
slot_base + slot_idx, host_addr,
|
||||
len, 0);
|
||||
len, flags, 0);
|
||||
if (ret) {
|
||||
gasket_page_table_unmap_nolock(pg_tbl, dev_addr,
|
||||
num_pages);
|
||||
@@ -1024,7 +1049,7 @@ static int gasket_map_extended_pages(struct gasket_page_table *pg_tbl,
|
||||
* The page table mutex is held for the entire operation.
|
||||
*/
|
||||
int gasket_page_table_map(struct gasket_page_table *pg_tbl, ulong host_addr,
|
||||
ulong dev_addr, uint num_pages)
|
||||
ulong dev_addr, uint num_pages, u32 flags)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@@ -1035,18 +1060,18 @@ int gasket_page_table_map(struct gasket_page_table *pg_tbl, ulong host_addr,
|
||||
|
||||
if (gasket_addr_is_simple(pg_tbl, dev_addr)) {
|
||||
ret = gasket_map_simple_pages(pg_tbl, host_addr, dev_addr,
|
||||
num_pages);
|
||||
num_pages, flags);
|
||||
} else {
|
||||
ret = gasket_map_extended_pages(pg_tbl, host_addr, dev_addr,
|
||||
num_pages);
|
||||
num_pages, flags);
|
||||
}
|
||||
|
||||
mutex_unlock(&pg_tbl->mutex);
|
||||
|
||||
dev_dbg(pg_tbl->device,
|
||||
"%s done: ha %llx daddr %llx num %d, ret %d\n",
|
||||
"%s done: ha %llx daddr %llx num %d, flags %x ret %d\n",
|
||||
__func__, (unsigned long long)host_addr,
|
||||
(unsigned long long)dev_addr, num_pages, ret);
|
||||
(unsigned long long)dev_addr, num_pages, flags, ret);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(gasket_page_table_map);
|
||||
@@ -1118,7 +1143,7 @@ int gasket_page_table_lookup_page(
|
||||
goto fail;
|
||||
|
||||
pte = pg_tbl->entries + page_num;
|
||||
if (pte->status != PTE_INUSE)
|
||||
if (GET(FLAGS_STATUS, pte->flags) != PTE_INUSE)
|
||||
goto fail;
|
||||
} else {
|
||||
/* Find the level 0 entry, */
|
||||
@@ -1127,13 +1152,13 @@ int gasket_page_table_lookup_page(
|
||||
goto fail;
|
||||
|
||||
pte = pg_tbl->entries + pg_tbl->num_simple_entries + page_num;
|
||||
if (pte->status != PTE_INUSE)
|
||||
if (GET(FLAGS_STATUS, pte->flags) != PTE_INUSE)
|
||||
goto fail;
|
||||
|
||||
/* and its contained level 1 entry. */
|
||||
page_num = gasket_extended_lvl1_page_idx(pg_tbl, dev_addr);
|
||||
pte = pte->sublevel + page_num;
|
||||
if (pte->status != PTE_INUSE)
|
||||
if (GET(FLAGS_STATUS, pte->flags) != PTE_INUSE)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
|
||||
@@ -85,6 +85,8 @@ int gasket_page_table_partition(struct gasket_page_table *page_table,
|
||||
* @host_addr: Starting host virtual memory address of the pages.
|
||||
* @dev_addr: Starting device address of the pages.
|
||||
* @num_pages: Number of [4kB] pages to map.
|
||||
* @flags: Specifies attributes to apply to the pages.
|
||||
* Internal structure matches gasket_page_table_ioctl_flags.flags.
|
||||
*
|
||||
* Description: Maps the "num_pages" pages of host memory pointed to by
|
||||
* host_addr to the address "dev_addr" in device memory.
|
||||
@@ -95,7 +97,7 @@ int gasket_page_table_partition(struct gasket_page_table *page_table,
|
||||
* If there is an error, no pages are mapped.
|
||||
*/
|
||||
int gasket_page_table_map(struct gasket_page_table *page_table, ulong host_addr,
|
||||
ulong dev_addr, uint num_pages);
|
||||
ulong dev_addr, uint num_pages, u32 flags);
|
||||
|
||||
/*
|
||||
* Un-map host pages from device memory.
|
||||
|
||||
Reference in New Issue
Block a user