summaryrefslogtreecommitdiff
path: root/drivers/gpu/pvr/services4/srvkm/common/buffer_manager.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/pvr/services4/srvkm/common/buffer_manager.c')
-rwxr-xr-x[-rw-r--r--]drivers/gpu/pvr/services4/srvkm/common/buffer_manager.c740
1 files changed, 284 insertions, 456 deletions
diff --git a/drivers/gpu/pvr/services4/srvkm/common/buffer_manager.c b/drivers/gpu/pvr/services4/srvkm/common/buffer_manager.c
index 9ce7a114f3c..10135425cb7 100644..100755
--- a/drivers/gpu/pvr/services4/srvkm/common/buffer_manager.c
+++ b/drivers/gpu/pvr/services4/srvkm/common/buffer_manager.c
@@ -50,23 +50,23 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#include "lists.h"
static IMG_BOOL
-ZeroBuf(BM_BUF *pBuf, BM_MAPPING *pMapping, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags);
+ZeroBuf(BM_BUF *pBuf, BM_MAPPING *pMapping, IMG_SIZE_T uBytes, IMG_UINT32 ui32Flags);
static IMG_VOID
BM_FreeMemory (IMG_VOID *pH, IMG_UINTPTR_T base, BM_MAPPING *psMapping);
static IMG_BOOL
BM_ImportMemory(IMG_VOID *pH, IMG_SIZE_T uSize,
IMG_SIZE_T *pActualSize, BM_MAPPING **ppsMapping,
- IMG_UINT32 uFlags, IMG_PVOID pvPrivData,
+ IMG_UINT32 ui32Flags, IMG_PVOID pvPrivData,
IMG_UINT32 ui32PrivDataLength, IMG_UINTPTR_T *pBase);
-static IMG_INT32
+static IMG_BOOL
DevMemoryAlloc (BM_CONTEXT *pBMContext,
BM_MAPPING *pMapping,
IMG_SIZE_T *pActualSize,
- IMG_UINT32 uFlags,
+ IMG_UINT32 ui32Flags,
IMG_UINT32 dev_vaddr_alignment,
IMG_DEV_VIRTADDR *pDevVAddr);
-static IMG_INT32
+static IMG_VOID
DevMemoryFree (BM_MAPPING *pMapping);
/*!
@@ -90,7 +90,7 @@ DevMemoryFree (BM_MAPPING *pMapping);
@Input psBMHeap - BM heap
@Input psDevVAddr - device virtual address (optional)
@Input uSize - requested buffer size in bytes.
- @Input uFlags - property flags for the buffer.
+ @Input ui32Flags - property flags for the buffer.
@Input uDevVAddrAlignment - required device virtual address
alignment, or 0.
@Input pvPrivData - opaque private data passed through to allocator
@@ -107,7 +107,7 @@ AllocMemory (BM_CONTEXT *pBMContext,
BM_HEAP *psBMHeap,
IMG_DEV_VIRTADDR *psDevVAddr,
IMG_SIZE_T uSize,
- IMG_UINT32 uFlags,
+ IMG_UINT32 ui32Flags,
IMG_UINT32 uDevVAddrAlignment,
IMG_PVOID pvPrivData,
IMG_UINT32 ui32PrivDataLength,
@@ -122,16 +122,16 @@ AllocMemory (BM_CONTEXT *pBMContext,
RA_ARENA *pArena = IMG_NULL;
PVR_DPF ((PVR_DBG_MESSAGE,
- "AllocMemory (uSize=0x%x, uFlags=0x%x, align=0x%x)",
- uSize, uFlags, uDevVAddrAlignment));
+ "AllocMemory (uSize=0x%" SIZE_T_FMT_LEN "x, ui32Flags=0x%x, align=0x%x)",
+ uSize, ui32Flags, uDevVAddrAlignment));
/*
what to do depends on combination of DevVaddr generation
and backing RAM requirement
*/
- if(uFlags & PVRSRV_MEM_RAM_BACKED_ALLOCATION)
+ if(ui32Flags & PVRSRV_MEM_RAM_BACKED_ALLOCATION)
{
- if(uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR)
+ if(ui32Flags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR)
{
/* user supplied DevVAddr, RAM backing */
PVR_DPF ((PVR_DBG_ERROR, "AllocMemory: combination of DevVAddr management and RAM backing mode unsupported"));
@@ -156,82 +156,64 @@ AllocMemory (BM_CONTEXT *pBMContext,
}
/* Now allocate from the arena we chose above. */
- /* in case of a pageable buffer, we must bypass RA which could
- * combine/split individual mappings between buffers:
- */
- if (uFlags & (PVRSRV_MEM_SPARSE | PVRSRV_HAP_GPU_PAGEABLE))
+ if (ui32Flags & PVRSRV_MEM_SPARSE)
{
IMG_BOOL bSuccess;
- IMG_SIZE_T puiActualSize;
- IMG_SIZE_T uRequestSize = uSize;
+ IMG_SIZE_T uActualSize;
- if(uFlags & PVRSRV_MEM_SPARSE)
- {
- uRequestSize = ui32ChunkSize * ui32NumPhysChunks;
- uSize = ui32ChunkSize * ui32NumVirtChunks;
- }
+ /* Allocate physcial memory */
+ bSuccess = BM_ImportMemory(psBMHeap,
+ ui32ChunkSize * ui32NumPhysChunks,
+ &uActualSize,
+ &pMapping,
+ ui32Flags,
+ pvPrivData,
+ ui32PrivDataLength,
+ IMG_NULL); /* We allocate VM space */
- /* Allocate physical memory */
- if (!BM_ImportMemory(psBMHeap,
- uRequestSize,
- &puiActualSize,
- &pMapping,
- uFlags,
- pvPrivData,
- ui32PrivDataLength,
- (IMG_UINTPTR_T *)&(pBuf->DevVAddr.uiAddr)))
+ if (!bSuccess)
{
- PVR_DPF((PVR_DBG_ERROR,
- "BM_ImportMemory: Failed to allocate device memory"));
+ PVR_DPF((PVR_DBG_ERROR, "AllocMemory: BM_ImportMemory failed"));
return IMG_FALSE;
}
- pBuf->hOSMemHandle = pMapping->hOSMemHandle;
- /* We allocate VM space for sparse area */
- if(uFlags & PVRSRV_MEM_SPARSE)
+ if (uActualSize != ui32ChunkSize * ui32NumPhysChunks)
{
- if (puiActualSize != ui32ChunkSize * ui32NumPhysChunks)
- {
- /*
- * Most likely the chunk size was not host page multiple,
- * so return with an error
- */
- PVR_DPF((PVR_DBG_ERROR, "AllocMemory: Failed to allocate"
- "memory for sparse allocation"));
- BM_FreeMemory(pArena, IMG_NULL, pMapping);
- return IMG_FALSE;
- }
-
- pMapping->uSizeVM = uSize;
- pMapping->ui32ChunkSize = ui32ChunkSize;
- pMapping->ui32NumVirtChunks = ui32NumVirtChunks;
- pMapping->ui32NumPhysChunks = ui32NumPhysChunks;
- pMapping->pabMapChunk = pabMapChunk;
+ /*
+ Most likley the chunksize was not host page multiple so
+ return with an error
+ */
+ PVR_DPF((PVR_DBG_ERROR, "AllocMemory: Failed to allocate memory for sparse allocation"));
+ BM_FreeMemory(pArena, IMG_NULL, pMapping);
+ return IMG_FALSE;
+ }
- if (!(uFlags & PVRSRV_HAP_NO_GPU_VIRTUAL_ON_ALLOC))
- {
- /* Allocate VA space and map in the physical memory */
- bSuccess = DevMemoryAlloc (pBMContext,
- pMapping,
- IMG_NULL,
- uFlags,
- (IMG_UINT32)uDevVAddrAlignment,
- &pMapping->DevVAddr);
- if (!bSuccess)
- {
- PVR_DPF((PVR_DBG_ERROR,
- "AllocMemory: Failed to allocate device memory"));
- BM_FreeMemory(pArena, IMG_NULL, pMapping);
- return IMG_FALSE;
- }
+ pMapping->uSizeVM = ui32ChunkSize * ui32NumVirtChunks;
+ uSize = pMapping->uSizeVM;
+ pMapping->ui32ChunkSize = ui32ChunkSize;
+ pMapping->ui32NumVirtChunks = ui32NumVirtChunks;
+ pMapping->ui32NumPhysChunks = ui32NumPhysChunks;
+ pMapping->pabMapChunk = pabMapChunk;
- /* uDevVAddrAlignment is currently set to zero so QAC
- * generates warning which we override */
- /* PRQA S 3356,3358 1 */
- PVR_ASSERT (uDevVAddrAlignment>1?(pMapping->DevVAddr.uiAddr%uDevVAddrAlignment)==0:1);
- pBuf->DevVAddr.uiAddr = pMapping->DevVAddr.uiAddr;
- }
+ /* Allocate VA space and map in the physical memory */
+ bSuccess = DevMemoryAlloc (pBMContext,
+ pMapping,
+ IMG_NULL,
+ ui32Flags,
+ uDevVAddrAlignment,
+ &pMapping->DevVAddr);
+ if (!bSuccess)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "AllocMemory: Failed to allocate device memory"));
+ BM_FreeMemory(pArena, IMG_NULL, pMapping);
+ return IMG_FALSE;
}
+
+ /* uDevVAddrAlignment is currently set to zero so QAC generates warning which we override */
+ /* PRQA S 3356,3358 1 */
+ PVR_ASSERT (uDevVAddrAlignment>1?(pMapping->DevVAddr.uiAddr%uDevVAddrAlignment)==0:1);
+ pBuf->DevVAddr.uiAddr = pMapping->DevVAddr.uiAddr;
}
else
{
@@ -239,15 +221,14 @@ AllocMemory (BM_CONTEXT *pBMContext,
uSize,
IMG_NULL,
(IMG_VOID*) &pMapping,
- uFlags,
+ ui32Flags,
uDevVAddrAlignment,
0,
pvPrivData,
ui32PrivDataLength,
(IMG_UINTPTR_T *)&(pBuf->DevVAddr.uiAddr)))
{
- PVR_DPF((PVR_DBG_ERROR, "AllocMemory: RA_Alloc(0x%x) hOSMemHandle %p, flags 0x%08x FAILED",
- uSize, pMapping->hOSMemHandle, uFlags));
+ PVR_DPF((PVR_DBG_ERROR, "AllocMemory: RA_Alloc(0x%" SIZE_T_FMT_LEN "x) FAILED", uSize));
return IMG_FALSE;
}
}
@@ -283,9 +264,9 @@ AllocMemory (BM_CONTEXT *pBMContext,
* will have a physical address, else 0 */
pBuf->CpuPAddr.uiAddr = pMapping->CpuPAddr.uiAddr + uOffset;
- if(uFlags & PVRSRV_MEM_ZERO)
+ if(ui32Flags & PVRSRV_MEM_ZERO)
{
- if(!ZeroBuf(pBuf, pMapping, uSize, psBMHeap->ui32Attribs | uFlags))
+ if(!ZeroBuf(pBuf, pMapping, uSize, psBMHeap->ui32Attribs | ui32Flags))
{
return IMG_FALSE;
}
@@ -293,7 +274,7 @@ AllocMemory (BM_CONTEXT *pBMContext,
}
else
{
- if(uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR)
+ if(ui32Flags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR)
{
/* user supplied DevVAddr, no RAM backing */
PVR_ASSERT(psDevVAddr != IMG_NULL);
@@ -341,7 +322,7 @@ AllocMemory (BM_CONTEXT *pBMContext,
(IMG_PVOID *)&pMapping, IMG_NULL,
"Buffer Manager Mapping") != PVRSRV_OK)
{
- PVR_DPF((PVR_DBG_ERROR, "AllocMemory: OSAllocMem(0x%x) FAILED", sizeof(*pMapping)));
+ PVR_DPF((PVR_DBG_ERROR, "AllocMemory: OSAllocMem(0x%" SIZE_T_FMT_LEN "x) FAILED", sizeof(*pMapping)));
return IMG_FALSE;
}
@@ -354,7 +335,6 @@ AllocMemory (BM_CONTEXT *pBMContext,
pMapping->CpuVAddr = IMG_NULL;
pMapping->CpuPAddr.uiAddr = 0;
pMapping->DevVAddr = pBuf->DevVAddr;
- pMapping->ui32MappingCount = 1;
pMapping->psSysAddr = IMG_NULL;
pMapping->uSize = uSize;
pMapping->hOSMemHandle = 0;
@@ -362,7 +342,6 @@ AllocMemory (BM_CONTEXT *pBMContext,
/* Record the arena pointer in the mapping. */
pMapping->pArena = pArena;
- pMapping->ui32DevVAddrAlignment = uDevVAddrAlignment;
/* record the heap */
pMapping->pBMHeap = psBMHeap;
@@ -370,18 +349,18 @@ AllocMemory (BM_CONTEXT *pBMContext,
/* output some stats */
PVR_DPF ((PVR_DBG_MESSAGE,
- "AllocMemory: pMapping=%08x: DevV=%08X CpuV=%08x CpuP=%08X uSize=0x%x",
- (IMG_UINTPTR_T)pMapping,
+ "AllocMemory: pMapping=%p: DevV=%08X CpuV=%p CpuP=" CPUPADDR_FMT " uSize=0x%" SIZE_T_FMT_LEN "x",
+ pMapping,
pMapping->DevVAddr.uiAddr,
- (IMG_UINTPTR_T)pMapping->CpuVAddr,
+ pMapping->CpuVAddr,
pMapping->CpuPAddr.uiAddr,
pMapping->uSize));
PVR_DPF ((PVR_DBG_MESSAGE,
- "AllocMemory: pBuf=%08x: DevV=%08X CpuV=%08x CpuP=%08X uSize=0x%x",
- (IMG_UINTPTR_T)pBuf,
+ "AllocMemory: pBuf=%p: DevV=%08X CpuV=%p CpuP=" CPUPADDR_FMT " uSize=0x%" SIZE_T_FMT_LEN "x",
+ pBuf,
pBuf->DevVAddr.uiAddr,
- (IMG_UINTPTR_T)pBuf->CpuVAddr,
+ pBuf->CpuVAddr,
pBuf->CpuPAddr.uiAddr,
uSize));
@@ -406,7 +385,7 @@ AllocMemory (BM_CONTEXT *pBMContext,
@Input bPhysContig - Is the wrap physically contiguous.
@Input psAddr - List of pages to wrap.
@Input pvCPUVAddr - Optional CPU Kernel virtual address (page aligned) of memory to wrap
- @Input uFlags - property flags for the buffer.
+ @Input ui32Flags - property flags for the buffer.
@Output Buf - receives a pointer to a descriptor of the allocated
buffer.
@Return IMG_TRUE - Success
@@ -416,27 +395,36 @@ AllocMemory (BM_CONTEXT *pBMContext,
static IMG_BOOL
WrapMemory (BM_HEAP *psBMHeap,
IMG_SIZE_T uSize,
- IMG_SIZE_T ui32BaseOffset,
+ IMG_SIZE_T uiBaseOffset,
IMG_BOOL bPhysContig,
IMG_SYS_PHYADDR *psAddr,
IMG_VOID *pvCPUVAddr,
- IMG_UINT32 uFlags,
+ IMG_UINT32 ui32Flags,
BM_BUF *pBuf)
{
IMG_DEV_VIRTADDR DevVAddr = {0};
BM_MAPPING *pMapping;
- IMG_INT32 bResult;
- IMG_SIZE_T const ui32PageSize = HOST_PAGESIZE();
+ IMG_BOOL bResult;
+ IMG_SIZE_T const uPageSize = HOST_PAGESIZE();
+ /* We should not pass down R/W flags into the OS layers so create ui32Attribs */
+ IMG_UINT32 ui32Attribs = ui32Flags & ~(PVRSRV_MEM_READ | PVRSRV_MEM_WRITE);
PVR_DPF ((PVR_DBG_MESSAGE,
- "WrapMemory(psBMHeap=%08X, size=0x%x, offset=0x%x, bPhysContig=0x%x, pvCPUVAddr = 0x%08x, flags=0x%x)",
- (IMG_UINTPTR_T)psBMHeap, uSize, ui32BaseOffset, bPhysContig, (IMG_UINTPTR_T)pvCPUVAddr, uFlags));
+ "WrapMemory(psBMHeap=%p, size=0x%" SIZE_T_FMT_LEN "x, offset=0x%" SIZE_T_FMT_LEN
+ "x, bPhysContig=0x%x, sysPAddr=0x" SYSPADDR_FMT ", pvCPUVAddr = 0x%p, flags=0x%x)",
+ psBMHeap,
+ uSize,
+ uiBaseOffset,
+ bPhysContig,
+ psAddr->uiAddr,
+ pvCPUVAddr,
+ ui32Flags));
- PVR_ASSERT((psAddr->uiAddr & (ui32PageSize - 1)) == 0);
+ PVR_ASSERT((psAddr->uiAddr & (uPageSize - 1)) == 0);
/* Only need lower 12 bits of the cpu addr - don't care what size a void* is */
- PVR_ASSERT(((IMG_UINTPTR_T)pvCPUVAddr & (ui32PageSize - 1)) == 0);
+ PVR_ASSERT(((IMG_UINTPTR_T)pvCPUVAddr & (uPageSize - 1)) == 0);
- uSize += ui32BaseOffset;
+ uSize += uiBaseOffset;
uSize = HOST_PAGEALIGN (uSize);
/* allocate a mocked-up mapping */
@@ -445,7 +433,7 @@ WrapMemory (BM_HEAP *psBMHeap,
(IMG_PVOID *)&pMapping, IMG_NULL,
"Mocked-up mapping") != PVRSRV_OK)
{
- PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSAllocMem(0x%x) FAILED",sizeof(*pMapping)));
+ PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSAllocMem(0x%" SIZE_T_FMT_LEN "x) FAILED", sizeof(*pMapping)));
return IMG_FALSE;
}
@@ -467,10 +455,10 @@ WrapMemory (BM_HEAP *psBMHeap,
if(OSRegisterMem(pMapping->CpuPAddr,
pMapping->CpuVAddr,
pMapping->uSize,
- uFlags,
+ ui32Attribs,
&pMapping->hOSMemHandle) != PVRSRV_OK)
{
- PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSRegisterMem Phys=0x%08X, Size=%d) failed",
+ PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSRegisterMem Phys=0x" CPUPADDR_FMT ", Size=%" SIZE_T_FMT_LEN "u) failed",
pMapping->CpuPAddr.uiAddr, pMapping->uSize));
goto fail_cleanup;
}
@@ -483,10 +471,10 @@ WrapMemory (BM_HEAP *psBMHeap,
if(OSRegisterDiscontigMem(pMapping->psSysAddr,
pMapping->CpuVAddr,
pMapping->uSize,
- uFlags,
+ ui32Attribs,
&pMapping->hOSMemHandle) != PVRSRV_OK)
{
- PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSRegisterDiscontigMem Size=%d) failed",
+ PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSRegisterDiscontigMem Size=0x%" SIZE_T_FMT_LEN "u) failed",
pMapping->uSize));
goto fail_cleanup;
}
@@ -501,12 +489,12 @@ WrapMemory (BM_HEAP *psBMHeap,
if(OSReservePhys(pMapping->CpuPAddr,
pMapping->uSize,
- uFlags,
+ ui32Attribs,
IMG_NULL,
&pMapping->CpuVAddr,
&pMapping->hOSMemHandle) != PVRSRV_OK)
{
- PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSReservePhys Phys=0x%08X, Size=%d) failed",
+ PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSReservePhys Phys=0x" CPUPADDR_FMT ", Size=%" SIZE_T_FMT_LEN "u) failed",
pMapping->CpuPAddr.uiAddr, pMapping->uSize));
goto fail_cleanup;
}
@@ -518,11 +506,11 @@ WrapMemory (BM_HEAP *psBMHeap,
if(OSReserveDiscontigPhys(pMapping->psSysAddr,
pMapping->uSize,
- uFlags,
+ ui32Attribs,
&pMapping->CpuVAddr,
&pMapping->hOSMemHandle) != PVRSRV_OK)
{
- PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSReserveDiscontigPhys Size=%d) failed",
+ PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSReserveDiscontigPhys Size=%" SIZE_T_FMT_LEN "u) failed",
pMapping->uSize));
goto fail_cleanup;
}
@@ -530,18 +518,18 @@ WrapMemory (BM_HEAP *psBMHeap,
}
/*
- * Allocate device memory for this buffer. Map wrapped pages as read/write
+ * Allocate device memory for this buffer.
*/
bResult = DevMemoryAlloc(psBMHeap->pBMContext,
pMapping,
IMG_NULL,
- uFlags | PVRSRV_MEM_READ | PVRSRV_MEM_WRITE,
- IMG_CAST_TO_DEVVADDR_UINT(ui32PageSize),
+ ui32Flags,
+ IMG_CAST_TO_DEVVADDR_UINT(uPageSize),
&DevVAddr);
- if (bResult <= 0)
+ if (!bResult)
{
PVR_DPF((PVR_DBG_ERROR,
- "WrapMemory: DevMemoryAlloc(0x%x) failed",
+ "WrapMemory: DevMemoryAlloc(0x%" SIZE_T_FMT_LEN "x) failed",
pMapping->uSize));
goto fail_cleanup;
}
@@ -552,17 +540,17 @@ WrapMemory (BM_HEAP *psBMHeap,
* addresses associated with this allocation are placed at the same
* offset within the underlying chunk.
*/
- pBuf->CpuPAddr.uiAddr = pMapping->CpuPAddr.uiAddr + ui32BaseOffset;
- if(!ui32BaseOffset)
+ pBuf->CpuPAddr.uiAddr = pMapping->CpuPAddr.uiAddr + uiBaseOffset;
+ if(!uiBaseOffset)
{
pBuf->hOSMemHandle = pMapping->hOSMemHandle;
}
else
{
if(OSGetSubMemHandle(pMapping->hOSMemHandle,
- ui32BaseOffset,
- (pMapping->uSize-ui32BaseOffset),
- uFlags,
+ uiBaseOffset,
+ (pMapping->uSize - uiBaseOffset),
+ ui32Attribs,
&pBuf->hOSMemHandle)!=PVRSRV_OK)
{
PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSGetSubMemHandle failed"));
@@ -571,13 +559,13 @@ WrapMemory (BM_HEAP *psBMHeap,
}
if(pMapping->CpuVAddr)
{
- pBuf->CpuVAddr = (IMG_VOID*) ((IMG_UINTPTR_T)pMapping->CpuVAddr + ui32BaseOffset);
+ pBuf->CpuVAddr = (IMG_VOID*) ((IMG_UINTPTR_T)pMapping->CpuVAddr + uiBaseOffset);
}
- pBuf->DevVAddr.uiAddr = pMapping->DevVAddr.uiAddr + IMG_CAST_TO_DEVVADDR_UINT(ui32BaseOffset);
+ pBuf->DevVAddr.uiAddr = pMapping->DevVAddr.uiAddr + IMG_CAST_TO_DEVVADDR_UINT(uiBaseOffset);
- if(uFlags & PVRSRV_MEM_ZERO)
+ if(ui32Flags & PVRSRV_MEM_ZERO)
{
- if(!ZeroBuf(pBuf, pMapping, uSize, uFlags))
+ if(!ZeroBuf(pBuf, pMapping, uSize, ui32Flags))
{
return IMG_FALSE;
}
@@ -585,19 +573,19 @@ WrapMemory (BM_HEAP *psBMHeap,
PVR_DPF ((PVR_DBG_MESSAGE, "DevVaddr.uiAddr=%08X", DevVAddr.uiAddr));
PVR_DPF ((PVR_DBG_MESSAGE,
- "WrapMemory: DevV=%08X CpuP=%08X uSize=0x%x",
+ "WrapMemory: DevV=%08X CpuP=" CPUPADDR_FMT " uSize=0x%" SIZE_T_FMT_LEN "x",
pMapping->DevVAddr.uiAddr, pMapping->CpuPAddr.uiAddr, pMapping->uSize));
PVR_DPF ((PVR_DBG_MESSAGE,
- "WrapMemory: DevV=%08X CpuP=%08X uSize=0x%x",
+ "WrapMemory: DevV=%08X CpuP=" CPUPADDR_FMT " uSize=0x%" SIZE_T_FMT_LEN "x",
pBuf->DevVAddr.uiAddr, pBuf->CpuPAddr.uiAddr, uSize));
pBuf->pMapping = pMapping;
return IMG_TRUE;
fail_cleanup:
- if(ui32BaseOffset && pBuf->hOSMemHandle)
+ if(uiBaseOffset && pBuf->hOSMemHandle)
{
- OSReleaseSubMemHandle(pBuf->hOSMemHandle, uFlags);
+ OSReleaseSubMemHandle(pBuf->hOSMemHandle, ui32Attribs);
}
if(pMapping && (pMapping->CpuVAddr || pMapping->hOSMemHandle))
@@ -605,16 +593,16 @@ fail_cleanup:
switch(pMapping->eCpuMemoryOrigin)
{
case hm_wrapped:
- OSUnReservePhys(pMapping->CpuVAddr, pMapping->uSize, uFlags, pMapping->hOSMemHandle);
+ OSUnReservePhys(pMapping->CpuVAddr, pMapping->uSize, ui32Attribs, pMapping->hOSMemHandle);
break;
case hm_wrapped_virtaddr:
- OSUnRegisterMem(pMapping->CpuVAddr, pMapping->uSize, uFlags, pMapping->hOSMemHandle);
+ OSUnRegisterMem(pMapping->CpuVAddr, pMapping->uSize, ui32Attribs, pMapping->hOSMemHandle);
break;
case hm_wrapped_scatter:
- OSUnReserveDiscontigPhys(pMapping->CpuVAddr, pMapping->uSize, uFlags, pMapping->hOSMemHandle);
+ OSUnReserveDiscontigPhys(pMapping->CpuVAddr, pMapping->uSize, ui32Attribs, pMapping->hOSMemHandle);
break;
case hm_wrapped_scatter_virtaddr:
- OSUnRegisterDiscontigMem(pMapping->CpuVAddr, pMapping->uSize, uFlags, pMapping->hOSMemHandle);
+ OSUnRegisterDiscontigMem(pMapping->CpuVAddr, pMapping->uSize, ui32Attribs, pMapping->hOSMemHandle);
break;
default:
break;
@@ -630,19 +618,19 @@ fail_cleanup:
static IMG_BOOL
-ZeroBuf(BM_BUF *pBuf, BM_MAPPING *pMapping, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags)
+ZeroBuf(BM_BUF *pBuf, BM_MAPPING *pMapping, IMG_SIZE_T uBytes, IMG_UINT32 ui32Flags)
{
IMG_VOID *pvCpuVAddr;
if(pBuf->CpuVAddr)
{
- OSMemSet(pBuf->CpuVAddr, 0, ui32Bytes);
+ OSMemSet(pBuf->CpuVAddr, 0, uBytes);
}
else if(pMapping->eCpuMemoryOrigin == hm_contiguous
|| pMapping->eCpuMemoryOrigin == hm_wrapped)
{
pvCpuVAddr = OSMapPhysToLin(pBuf->CpuPAddr,
- ui32Bytes,
+ uBytes,
PVRSRV_HAP_KERNEL_ONLY
| (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK),
IMG_NULL);
@@ -651,17 +639,17 @@ ZeroBuf(BM_BUF *pBuf, BM_MAPPING *pMapping, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui3
PVR_DPF((PVR_DBG_ERROR, "ZeroBuf: OSMapPhysToLin for contiguous buffer failed"));
return IMG_FALSE;
}
- OSMemSet(pvCpuVAddr, 0, ui32Bytes);
+ OSMemSet(pvCpuVAddr, 0, uBytes);
OSUnMapPhysToLin(pvCpuVAddr,
- ui32Bytes,
+ uBytes,
PVRSRV_HAP_KERNEL_ONLY
| (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK),
IMG_NULL);
}
else
{
- IMG_SIZE_T ui32BytesRemaining = ui32Bytes;
- IMG_SIZE_T ui32CurrentOffset = 0;
+ IMG_SIZE_T uBytesRemaining = uBytes;
+ IMG_SIZE_T uCurrentOffset = 0;
IMG_CPU_PHYADDR CpuPAddr;
/* Walk through the pBuf one page at a time and use
@@ -669,21 +657,21 @@ ZeroBuf(BM_BUF *pBuf, BM_MAPPING *pMapping, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui3
PVR_ASSERT(pBuf->hOSMemHandle);
- while(ui32BytesRemaining > 0)
+ while(uBytesRemaining > 0)
{
- IMG_SIZE_T ui32BlockBytes = MIN(ui32BytesRemaining, HOST_PAGESIZE());
- CpuPAddr = OSMemHandleToCpuPAddr(pBuf->hOSMemHandle, ui32CurrentOffset);
+ IMG_SIZE_T uBlockBytes = MIN(uBytesRemaining, HOST_PAGESIZE());
+ CpuPAddr = OSMemHandleToCpuPAddr(pBuf->hOSMemHandle, uCurrentOffset);
/* If the CpuPAddr isn't page aligned then start by writing up to the next page
- * boundary (or ui32BytesRemaining if less), so that subsequent iterations can
+ * boundary (or uBytesRemaining if less), so that subsequent iterations can
* copy full physical pages. */
if(CpuPAddr.uiAddr & (HOST_PAGESIZE() -1))
{
- ui32BlockBytes =
- MIN(ui32BytesRemaining, (IMG_UINT32)(HOST_PAGEALIGN(CpuPAddr.uiAddr) - CpuPAddr.uiAddr));
+ uBlockBytes =
+ MIN(uBytesRemaining, (IMG_UINT32)(HOST_PAGEALIGN(CpuPAddr.uiAddr) - CpuPAddr.uiAddr));
}
pvCpuVAddr = OSMapPhysToLin(CpuPAddr,
- ui32BlockBytes,
+ uBlockBytes,
PVRSRV_HAP_KERNEL_ONLY
| (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK),
IMG_NULL);
@@ -692,15 +680,15 @@ ZeroBuf(BM_BUF *pBuf, BM_MAPPING *pMapping, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui3
PVR_DPF((PVR_DBG_ERROR, "ZeroBuf: OSMapPhysToLin while zeroing non-contiguous memory FAILED"));
return IMG_FALSE;
}
- OSMemSet(pvCpuVAddr, 0, ui32BlockBytes);
+ OSMemSet(pvCpuVAddr, 0, uBlockBytes);
OSUnMapPhysToLin(pvCpuVAddr,
- ui32BlockBytes,
+ uBlockBytes,
PVRSRV_HAP_KERNEL_ONLY
| (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK),
IMG_NULL);
- ui32BytesRemaining -= ui32BlockBytes;
- ui32CurrentOffset += ui32BlockBytes;
+ uBytesRemaining -= uBlockBytes;
+ uCurrentOffset += uBlockBytes;
}
}
@@ -734,9 +722,9 @@ FreeBuf (BM_BUF *pBuf, IMG_UINT32 ui32Flags, IMG_BOOL bFromAllocator)
PVRSRV_DEVICE_NODE *psDeviceNode;
PVR_DPF ((PVR_DBG_MESSAGE,
- "FreeBuf: pBuf=0x%x: DevVAddr=%08X CpuVAddr=0x%x CpuPAddr=%08X",
- (IMG_UINTPTR_T)pBuf, pBuf->DevVAddr.uiAddr,
- (IMG_UINTPTR_T)pBuf->CpuVAddr, pBuf->CpuPAddr.uiAddr));
+ "FreeBuf: pBuf=0x%p: DevVAddr=%08X CpuVAddr=0x%p CpuPAddr=" CPUPADDR_FMT,
+ pBuf, pBuf->DevVAddr.uiAddr,
+ pBuf->CpuVAddr, pBuf->CpuPAddr.uiAddr));
/* record mapping */
pMapping = pBuf->pMapping;
@@ -777,11 +765,9 @@ FreeBuf (BM_BUF *pBuf, IMG_UINT32 ui32Flags, IMG_BOOL bFromAllocator)
OSReleaseSubMemHandle(pBuf->hOSMemHandle, ui32Flags);
}
}
-
if(ui32Flags & PVRSRV_MEM_RAM_BACKED_ALLOCATION)
{
/* Submemhandle is required by exported mappings */
-
if ((pBuf->ui32ExportCount == 0) && (pBuf->ui32RefCount == 0))
{
/*
@@ -789,27 +775,17 @@ FreeBuf (BM_BUF *pBuf, IMG_UINT32 ui32Flags, IMG_BOOL bFromAllocator)
Note: currently no need to distinguish between hm_env and hm_contiguous
*/
PVR_ASSERT(pBuf->ui32ExportCount == 0);
- if (pBuf->pMapping->ui32Flags & (PVRSRV_MEM_SPARSE | PVRSRV_HAP_GPU_PAGEABLE))
+ if (pBuf->pMapping->ui32Flags & PVRSRV_MEM_SPARSE)
{
- IMG_UINT32 ui32FreeSize = 0;
- IMG_PVOID pvFreePtr = IMG_NULL;
-
- if(pBuf->pMapping->ui32Flags & PVRSRV_MEM_SPARSE)
- {
- ui32FreeSize = sizeof(IMG_BOOL) * pBuf->pMapping->ui32NumVirtChunks;
- pvFreePtr = pBuf->pMapping->pabMapChunk;
- }
-
- /* With sparse and page-able allocations we don't go through the sub-alloc RA */
+ IMG_UINT32 ui32FreeSize = sizeof(IMG_BOOL) * pBuf->pMapping->ui32NumVirtChunks;
+ IMG_PVOID pvFreePtr = pBuf->pMapping->pabMapChunk;
+
+ /* With sparse allocations we don't go through the sub-alloc RA */
BM_FreeMemory(pBuf->pMapping->pBMHeap, pBuf->DevVAddr.uiAddr, pBuf->pMapping);
-
- if(pvFreePtr)
- {
- OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
ui32FreeSize,
pvFreePtr,
IMG_NULL);
- }
}
else
{
@@ -1559,7 +1535,7 @@ BM_Alloc ( IMG_HANDLE hDevMemHeap,
BM_CONTEXT *pBMContext;
BM_HEAP *psBMHeap;
SYS_DATA *psSysData;
- IMG_UINT32 uFlags;
+ IMG_UINT32 ui32Flags;
if (pui32Flags == IMG_NULL)
{
@@ -1568,11 +1544,11 @@ BM_Alloc ( IMG_HANDLE hDevMemHeap,
return IMG_FALSE;
}
- uFlags = *pui32Flags;
+ ui32Flags = *pui32Flags;
PVR_DPF ((PVR_DBG_MESSAGE,
- "BM_Alloc (uSize=0x%x, uFlags=0x%x, uDevVAddrAlignment=0x%x)",
- uSize, uFlags, uDevVAddrAlignment));
+ "BM_Alloc (uSize=0x%" SIZE_T_FMT_LEN "x, ui32Flags=0x%x, uDevVAddrAlignment=0x%x)",
+ uSize, ui32Flags, uDevVAddrAlignment));
SysAcquireData(&psSysData);
@@ -1604,7 +1580,7 @@ BM_Alloc ( IMG_HANDLE hDevMemHeap,
psBMHeap,
psDevVAddr,
uSize,
- uFlags,
+ ui32Flags,
uDevVAddrAlignment,
pvPrivData,
ui32PrivDataLength,
@@ -1621,24 +1597,24 @@ BM_Alloc ( IMG_HANDLE hDevMemHeap,
}
PVR_DPF ((PVR_DBG_MESSAGE,
- "BM_Alloc (uSize=0x%x, uFlags=0x%x)",
- uSize, uFlags));
+ "BM_Alloc (uSize=0x%" SIZE_T_FMT_LEN "x, ui32Flags=0x%x)",
+ uSize, ui32Flags));
/*
* Assign the handle and return.
*/
pBuf->ui32RefCount = 1;
*phBuf = (BM_HANDLE)pBuf;
- *pui32Flags = uFlags | psBMHeap->ui32Attribs;
+ *pui32Flags = ui32Flags | psBMHeap->ui32Attribs;
/*
* If the user has specified heap CACHETYPE flags themselves,
* override any CACHETYPE flags inherited from the heap.
*/
- if(uFlags & PVRSRV_HAP_CACHETYPE_MASK)
+ if(ui32Flags & PVRSRV_HAP_CACHETYPE_MASK)
{
*pui32Flags &= ~PVRSRV_HAP_CACHETYPE_MASK;
- *pui32Flags |= (uFlags & PVRSRV_HAP_CACHETYPE_MASK);
+ *pui32Flags |= (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK);
}
return IMG_TRUE;
@@ -1657,13 +1633,13 @@ BM_Alloc ( IMG_HANDLE hDevMemHeap,
@Input psDeviceNode
@Input psSysPAddr - system address array
- @Input ui32PageSize - size of address array
+ @Input uPageSize - size of address array
@Return IMG_BOOL
*****************************************************************************/
static IMG_BOOL
-ValidSysPAddrArrayForDev(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_SYS_PHYADDR *psSysPAddr, IMG_UINT32 ui32PageCount, IMG_SIZE_T ui32PageSize)
+ValidSysPAddrArrayForDev(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_SYS_PHYADDR *psSysPAddr, IMG_UINT32 ui32PageCount, IMG_SIZE_T uPageSize)
{
IMG_UINT32 i;
@@ -1677,7 +1653,7 @@ ValidSysPAddrArrayForDev(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_SYS_PHYADDR *psSy
return IMG_FALSE;
}
- sEndSysPAddr.uiAddr = sStartSysPAddr.uiAddr + ui32PageSize;
+ sEndSysPAddr.uiAddr = sStartSysPAddr.uiAddr + uPageSize;
if (!SysVerifySysPAddrToDevPAddr(psDeviceNode->sDevId.eDeviceType, sEndSysPAddr))
{
@@ -1704,7 +1680,7 @@ ValidSysPAddrArrayForDev(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_SYS_PHYADDR *psSy
*****************************************************************************/
static IMG_BOOL
-ValidSysPAddrRangeForDev(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_SYS_PHYADDR sStartSysPAddr, IMG_SIZE_T ui32Range)
+ValidSysPAddrRangeForDev(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_SYS_PHYADDR sStartSysPAddr, IMG_SIZE_T uRange)
{
IMG_SYS_PHYADDR sEndSysPAddr;
@@ -1713,7 +1689,7 @@ ValidSysPAddrRangeForDev(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_SYS_PHYADDR sStar
return IMG_FALSE;
}
- sEndSysPAddr.uiAddr = sStartSysPAddr.uiAddr + ui32Range;
+ sEndSysPAddr.uiAddr = sStartSysPAddr.uiAddr + uRange;
if (!SysVerifySysPAddrToDevPAddr(psDeviceNode->sDevId.eDeviceType, sEndSysPAddr))
{
@@ -1723,9 +1699,9 @@ ValidSysPAddrRangeForDev(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_SYS_PHYADDR sStar
return IMG_TRUE;
}
-#define WRAP_MAPPING_SIZE(ui32ByteSize, ui32PageOffset) HOST_PAGEALIGN((ui32ByteSize) + (ui32PageOffset))
+#define WRAP_MAPPING_SIZE(uByteSize, uPageOffset) HOST_PAGEALIGN((uByteSize) + (uPageOffset))
-#define WRAP_PAGE_COUNT(ui32ByteSize, ui32PageOffset, ui32HostPageSize) (WRAP_MAPPING_SIZE(ui32ByteSize, ui32PageOffset) / (ui32HostPageSize))
+#define WRAP_PAGE_COUNT(uByteSize, uPageOffset, uHostPageSize) (WRAP_MAPPING_SIZE(uByteSize, uPageOffset) / (uHostPageSize))
#endif
@@ -1740,12 +1716,12 @@ ValidSysPAddrRangeForDev(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_SYS_PHYADDR sStar
The wrapped memory must be page aligned. BM_Wrap will
roundup the size to a multiple of cpu pages.
- @Input ui32Size - size of memory to wrap.
+ @Input uSize - size of memory to wrap.
@Input ui32Offset - Offset into page of memory to wrap.
@Input bPhysContig - Is the wrap physically contiguous.
@Input psSysAddr - list of system physical page addresses of memory to wrap.
@Input pvCPUVAddr - optional CPU kernel virtual address (Page aligned) of memory to wrap.
- @Input uFlags - bit mask of buffer property flags.
+ @Input ui32Flags - bit mask of buffer property flags.
@output phBuf - receives the buffer handle.
@Return IMG_TRUE - Success.
@@ -1754,8 +1730,8 @@ ValidSysPAddrRangeForDev(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_SYS_PHYADDR sStar
*****************************************************************************/
IMG_BOOL
BM_Wrap ( IMG_HANDLE hDevMemHeap,
- IMG_SIZE_T ui32Size,
- IMG_SIZE_T ui32Offset,
+ IMG_SIZE_T uSize,
+ IMG_SIZE_T uOffset,
IMG_BOOL bPhysContig,
IMG_SYS_PHYADDR *psSysAddr,
IMG_VOID *pvCPUVAddr,
@@ -1767,29 +1743,41 @@ BM_Wrap ( IMG_HANDLE hDevMemHeap,
BM_HEAP *psBMHeap;
SYS_DATA *psSysData;
IMG_SYS_PHYADDR sHashAddress;
- IMG_UINT32 uFlags;
+ IMG_UINT32 ui32Flags;
psBMHeap = (BM_HEAP*)hDevMemHeap;
psBMContext = psBMHeap->pBMContext;
- uFlags = psBMHeap->ui32Attribs & (PVRSRV_HAP_CACHETYPE_MASK | PVRSRV_HAP_MAPTYPE_MASK | PVRSRV_HAP_MAPPING_CTRL_MASK);
+ ui32Flags = psBMHeap->ui32Attribs & (PVRSRV_HAP_CACHETYPE_MASK | PVRSRV_HAP_MAPTYPE_MASK);
if ((pui32Flags != IMG_NULL) && ((*pui32Flags & PVRSRV_HAP_CACHETYPE_MASK) != 0))
{
- uFlags &= ~PVRSRV_HAP_CACHETYPE_MASK;
- uFlags |= *pui32Flags & PVRSRV_HAP_CACHETYPE_MASK;
+ ui32Flags &= ~PVRSRV_HAP_CACHETYPE_MASK;
+ ui32Flags |= *pui32Flags & PVRSRV_HAP_CACHETYPE_MASK;
+ }
+
+ if ((pui32Flags != IMG_NULL) && ((*pui32Flags & (PVRSRV_MEM_READ | PVRSRV_MEM_WRITE)) != 0))
+ {
+ ui32Flags &= ~(PVRSRV_MEM_READ | PVRSRV_MEM_WRITE);
+ ui32Flags |= *pui32Flags & (PVRSRV_MEM_READ | PVRSRV_MEM_WRITE);
}
PVR_DPF ((PVR_DBG_MESSAGE,
- "BM_Wrap (uSize=0x%x, uOffset=0x%x, bPhysContig=0x%x, pvCPUVAddr=0x%x, uFlags=0x%x)",
- ui32Size, ui32Offset, bPhysContig, (IMG_UINTPTR_T)pvCPUVAddr, uFlags));
+ "BM_Wrap (uSize=0x%" SIZE_T_FMT_LEN "x, uOffset=0x%" SIZE_T_FMT_LEN
+ "x, bPhysContig=0x%x, syspAddr=0x" SYSPADDR_FMT ", pvCPUVAddr=0x%p, ui32Flags=0x%x)",
+ uSize,
+ uOffset,
+ bPhysContig,
+ psSysAddr->uiAddr,
+ pvCPUVAddr,
+ ui32Flags));
SysAcquireData(&psSysData);
#if defined(PVR_LMA)
if (bPhysContig)
{
- if (!ValidSysPAddrRangeForDev(psBMContext->psDeviceNode, *psSysAddr, WRAP_MAPPING_SIZE(ui32Size, ui32Offset)))
+ if (!ValidSysPAddrRangeForDev(psBMContext->psDeviceNode, *psSysAddr, WRAP_MAPPING_SIZE(uSize, uOffset)))
{
PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: System address range invalid for device"));
return IMG_FALSE;
@@ -1797,9 +1785,9 @@ BM_Wrap ( IMG_HANDLE hDevMemHeap,
}
else
{
- IMG_SIZE_T ui32HostPageSize = HOST_PAGESIZE();
+ IMG_SIZE_T uHostPageSize = HOST_PAGESIZE();
- if (!ValidSysPAddrArrayForDev(psBMContext->psDeviceNode, psSysAddr, WRAP_PAGE_COUNT(ui32Size, ui32Offset, ui32HostPageSize), ui32HostPageSize))
+ if (!ValidSysPAddrArrayForDev(psBMContext->psDeviceNode, psSysAddr, WRAP_PAGE_COUNT(uSize, uOffset, uHostPageSize), uHostPageSize))
{
PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: Array of system addresses invalid for device"));
return IMG_FALSE;
@@ -1813,27 +1801,29 @@ BM_Wrap ( IMG_HANDLE hDevMemHeap,
sHashAddress = psSysAddr[0];
/* Add the in-page offset to ensure a unique hash */
- sHashAddress.uiAddr += ui32Offset;
+ sHashAddress.uiAddr += uOffset;
- /* See if this address has already been wrapped */
- pBuf = (BM_BUF *)HASH_Retrieve(psBMContext->pBufferHash, sHashAddress.uiAddr);
+ /* See if this address has already been wrapped, note that the cast is ok as this is only local mem */
+ pBuf = (BM_BUF *)HASH_Retrieve(psBMContext->pBufferHash, (IMG_UINTPTR_T)sHashAddress.uiAddr);
if(pBuf)
{
- IMG_SIZE_T ui32MappingSize = HOST_PAGEALIGN (ui32Size + ui32Offset);
+ IMG_SIZE_T uMappingSize = HOST_PAGEALIGN (uSize + uOffset);
/* Check base address, size and contiguity type match */
- if(pBuf->pMapping->uSize == ui32MappingSize && (pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped ||
+ if(pBuf->pMapping->uSize == uMappingSize && (pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped ||
pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped_virtaddr))
{
PVR_DPF((PVR_DBG_MESSAGE,
- "BM_Wrap (Matched previous Wrap! uSize=0x%x, uOffset=0x%x, SysAddr=%08X)",
- ui32Size, ui32Offset, sHashAddress.uiAddr));
+ "BM_Wrap (Matched previous Wrap! uSize=0x%" SIZE_T_FMT_LEN "x, uOffset=0x%" SIZE_T_FMT_LEN "x, SysAddr=" SYSPADDR_FMT ")",
+ uSize,
+ uOffset,
+ sHashAddress.uiAddr));
PVRSRVBMBufIncRef(pBuf);
*phBuf = (BM_HANDLE)pBuf;
if(pui32Flags)
- *pui32Flags = uFlags;
+ *pui32Flags = ui32Flags;
return IMG_TRUE;
}
@@ -1861,7 +1851,7 @@ BM_Wrap ( IMG_HANDLE hDevMemHeap,
/*
* Actually perform the memory wrap.
*/
- if (WrapMemory (psBMHeap, ui32Size, ui32Offset, bPhysContig, psSysAddr, pvCPUVAddr, uFlags, pBuf) != IMG_TRUE)
+ if (WrapMemory (psBMHeap, uSize, uOffset, bPhysContig, psSysAddr, pvCPUVAddr, ui32Flags, pBuf) != IMG_TRUE)
{
PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: WrapMemory FAILED"));
OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof (BM_BUF), pBuf, IMG_NULL);
@@ -1877,17 +1867,17 @@ BM_Wrap ( IMG_HANDLE hDevMemHeap,
/* Have we calculated the right Hash key ? */
PVR_ASSERT(SysSysPAddrToCpuPAddr(sHashAddress).uiAddr == pBuf->CpuPAddr.uiAddr);
- if (!HASH_Insert (psBMContext->pBufferHash, sHashAddress.uiAddr, (IMG_UINTPTR_T)pBuf))
+ if (!HASH_Insert (psBMContext->pBufferHash, (IMG_UINTPTR_T)sHashAddress.uiAddr, (IMG_UINTPTR_T)pBuf))
{
- FreeBuf (pBuf, uFlags, IMG_TRUE);
+ FreeBuf (pBuf, ui32Flags, IMG_TRUE);
PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: HASH_Insert FAILED"));
return IMG_FALSE;
}
}
PVR_DPF ((PVR_DBG_MESSAGE,
- "BM_Wrap (uSize=0x%x, uFlags=0x%x, devVAddr=%08X)",
- ui32Size, uFlags, pBuf->DevVAddr.uiAddr));
+ "BM_Wrap (uSize=0x%" SIZE_T_FMT_LEN "x, ui32Flags=0x%x, devVAddr=%08X)",
+ uSize, ui32Flags, pBuf->DevVAddr.uiAddr));
/*
* Assign the handle and return.
@@ -1897,7 +1887,7 @@ BM_Wrap ( IMG_HANDLE hDevMemHeap,
if(pui32Flags)
{
/* need to override the heap attributes SINGLE PROC to MULT_PROC. */
- *pui32Flags = (uFlags & ~PVRSRV_HAP_MAPTYPE_MASK) | PVRSRV_HAP_MULTI_PROCESS;
+ *pui32Flags = (ui32Flags & ~PVRSRV_HAP_MAPTYPE_MASK) | PVRSRV_HAP_MULTI_PROCESS;
}
return IMG_TRUE;
@@ -1964,7 +1954,7 @@ BM_Free (BM_HANDLE hBuf,
SYS_DATA *psSysData;
IMG_SYS_PHYADDR sHashAddr;
- PVR_DPF ((PVR_DBG_MESSAGE, "BM_Free (h=0x%x)", (IMG_UINTPTR_T)hBuf));
+ PVR_DPF ((PVR_DBG_MESSAGE, "BM_Free (h=0x%p)", hBuf));
PVR_ASSERT (pBuf!=IMG_NULL);
if (pBuf == IMG_NULL)
@@ -2014,8 +2004,8 @@ BM_HandleToCpuVaddr (BM_HANDLE hBuf)
}
PVR_DPF ((PVR_DBG_MESSAGE,
- "BM_HandleToCpuVaddr(h=0x%x)=0x%x",
- (IMG_UINTPTR_T)hBuf, (IMG_UINTPTR_T)pBuf->CpuVAddr));
+ "BM_HandleToCpuVaddr(h=0x%p)=0x%p",
+ hBuf, pBuf->CpuVAddr));
return pBuf->CpuVAddr;
}
@@ -2045,7 +2035,7 @@ BM_HandleToDevVaddr (BM_HANDLE hBuf)
return DevVAddr;
}
- PVR_DPF ((PVR_DBG_MESSAGE, "BM_HandleToDevVaddr(h=0x%x)=%08X", (IMG_UINTPTR_T)hBuf, pBuf->DevVAddr.uiAddr));
+ PVR_DPF ((PVR_DBG_MESSAGE, "BM_HandleToDevVaddr(h=0x%p)=%08X", hBuf, pBuf->DevVAddr.uiAddr));
return pBuf->DevVAddr;
}
@@ -2076,7 +2066,7 @@ BM_HandleToSysPaddr (BM_HANDLE hBuf)
return PhysAddr;
}
- PVR_DPF ((PVR_DBG_MESSAGE, "BM_HandleToSysPaddr(h=0x%x)=%08X", (IMG_UINTPTR_T)hBuf, pBuf->CpuPAddr.uiAddr));
+ PVR_DPF ((PVR_DBG_MESSAGE, "BM_HandleToSysPaddr(h=0lx%p)=" CPUPADDR_FMT, hBuf, pBuf->CpuPAddr.uiAddr));
return SysCpuPAddrToSysPAddr (pBuf->CpuPAddr);
}
@@ -2106,106 +2096,11 @@ BM_HandleToOSMemHandle(BM_HANDLE hBuf)
}
PVR_DPF ((PVR_DBG_MESSAGE,
- "BM_HandleToOSMemHandle(h=0x%x)=0x%x",
- (IMG_UINTPTR_T)hBuf, (IMG_UINTPTR_T)pBuf->hOSMemHandle));
+ "BM_HandleToOSMemHandle(h=0x%p)=0x%p",
+ hBuf, pBuf->hOSMemHandle));
return pBuf->hOSMemHandle;
}
-/*----------------------------------------------------------------------------
-<function>
- FUNCTION: BM_UnmapFromDev
-
- PURPOSE: Unmaps a buffer from GPU virtual address space, but otherwise
- leaves buffer intact (ie. not changing any CPU virtual space
- mappings, etc). This in conjunction with BM_RemapToDev() can
- be used to migrate buffers in and out of GPU virtual address
- space to deal with fragmentation and/or limited size of GPU
- MMU.
-
- PARAMETERS: In: hBuf - buffer handle.
- RETURNS: IMG_TRUE - Success
- IMG_FALSE - Failure
-</function>
------------------------------------------------------------------------------*/
-IMG_INT32
-BM_UnmapFromDev(BM_HANDLE hBuf)
-{
- BM_BUF *pBuf = (BM_BUF *)hBuf;
- BM_MAPPING *pMapping;
- IMG_INT32 result;
-
- PVR_ASSERT (pBuf != IMG_NULL);
-
- if (pBuf == IMG_NULL)
- {
- PVR_DPF((PVR_DBG_ERROR, "BM_UnmapFromDev: invalid parameter"));
- return -(PVRSRV_ERROR_INVALID_PARAMS);
- }
-
- pMapping = pBuf->pMapping;
-
- if ((pMapping->ui32Flags & PVRSRV_HAP_GPU_PAGEABLE) == 0)
- {
- PVR_DPF((PVR_DBG_ERROR, "BM_UnmapFromDev: cannot unmap non-pageable buffer"));
- return -(PVRSRV_ERROR_STILL_MAPPED);
- }
-
- result = DevMemoryFree(pMapping);
-
- if(result == 0)
- pBuf->DevVAddr.uiAddr = PVRSRV_BAD_DEVICE_ADDRESS;
-
- return result;
-}
-
-/*----------------------------------------------------------------------------
-<function>
- FUNCTION: BM_RemapToDev
-
- PURPOSE: Maps a buffer back into GPU virtual address space, after it
- has been BM_UnmapFromDev()'d. After this operation, the GPU
- virtual address may have changed, so BM_HandleToDevVaddr()
- should be called to get the new address.
-
- PARAMETERS: In: hBuf - buffer handle.
- RETURNS: IMG_TRUE - Success
- IMG_FALSE - Failure
-</function>
------------------------------------------------------------------------------*/
-IMG_INT32
-BM_RemapToDev(BM_HANDLE hBuf)
-{
- BM_BUF *pBuf = (BM_BUF *)hBuf;
- BM_MAPPING *pMapping;
- IMG_INT32 mapCount;
-
- PVR_ASSERT (pBuf != IMG_NULL);
-
- if (pBuf == IMG_NULL)
- {
- PVR_DPF((PVR_DBG_ERROR, "BM_RemapToDev: invalid parameter"));
- return -PVRSRV_ERROR_INVALID_PARAMS;
- }
-
- pMapping = pBuf->pMapping;
-
- if ((pMapping->ui32Flags & PVRSRV_HAP_GPU_PAGEABLE) == 0)
- {
- PVR_DPF((PVR_DBG_ERROR, "BM_RemapToDev: cannot remap non-pageable buffer"));
- return -PVRSRV_ERROR_BAD_MAPPING;
- }
-
- mapCount = DevMemoryAlloc(pMapping->pBMHeap->pBMContext, pMapping, IMG_NULL,
- pMapping->ui32Flags, pMapping->ui32DevVAddrAlignment, &pBuf->DevVAddr);
-
- if(mapCount <= 0)
- {
- PVR_DPF((PVR_DBG_WARNING, "BM_RemapToDev: failed to allocate device memory"));
- }
-
- return mapCount;
-}
-
/*!
******************************************************************************
@@ -2224,52 +2119,48 @@ BM_RemapToDev(BM_HANDLE hBuf)
allocation.
@Output pActualSize - the actual size of the block allocated in
bytes.
- @Input uFlags - allocation flags
+ @Input ui32Flags - allocation flags
@Input dev_vaddr_alignment - required device virtual address
alignment, or 0.
@Output pDevVAddr - receives the device virtual base address of the
allocated block.
- @Return IMG_INT32 - Reference count
- -1 - Failed.
+ @Return IMG_TRUE - Success
+ IMG_FALSE - Failed.
*****************************************************************************/
-static IMG_INT32
+static IMG_BOOL
DevMemoryAlloc (BM_CONTEXT *pBMContext,
BM_MAPPING *pMapping,
IMG_SIZE_T *pActualSize,
- IMG_UINT32 uFlags,
+ IMG_UINT32 ui32Flags,
IMG_UINT32 dev_vaddr_alignment,
IMG_DEV_VIRTADDR *pDevVAddr)
{
PVRSRV_DEVICE_NODE *psDeviceNode;
#ifdef PDUMP
IMG_UINT32 ui32PDumpSize = (IMG_UINT32)pMapping->uSize;
+ IMG_UINT32 ui32PDumpFlags;
#endif
- if(pMapping->ui32MappingCount > 0)
- {
- pMapping->ui32MappingCount++;
- *pDevVAddr = pMapping->DevVAddr;
- return pMapping->ui32MappingCount;
- }
-
psDeviceNode = pBMContext->psDeviceNode;
- pMapping->ui32DevVAddrAlignment = dev_vaddr_alignment;
+#ifdef PDUMP
+#if defined(SUPPORT_PDUMP_MULTI_PROCESS)
+ ui32PDumpFlags = psDeviceNode->pfnMMUIsHeapShared(pMapping->pBMHeap->pMMUHeap)
+ ? PDUMP_FLAGS_PERSISTENT : PDUMP_FLAGS_CONTINUOUS;
+#else
+ ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS;
+#endif
+#endif
- if(uFlags & PVRSRV_MEM_INTERLEAVED)
+ if(ui32Flags & PVRSRV_MEM_INTERLEAVED)
{
/* double the size */
- /* don't continue to alter the size each time a buffer is remapped..
- * we only want to do this the first time
- */
- /* TODO: FIXME: There is something wrong with this logic */
- if (pMapping->ui32MappingCount == 0)
- pMapping->uSize *= 2;
+ pMapping->uSize *= 2;
}
#ifdef PDUMP
- if(uFlags & PVRSRV_MEM_DUMMY)
+ if(ui32Flags & PVRSRV_MEM_DUMMY)
{
/* only one page behind a dummy allocation */
ui32PDumpSize = pMapping->pBMHeap->sDevArena.ui32DataPageSize;
@@ -2287,8 +2178,7 @@ DevMemoryAlloc (BM_CONTEXT *pBMContext,
&(pMapping->DevVAddr)))
{
PVR_DPF((PVR_DBG_ERROR, "DevMemoryAlloc ERROR MMU_Alloc"));
- pDevVAddr->uiAddr = PVRSRV_BAD_DEVICE_ADDRESS;
- return -(PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY);
+ return IMG_FALSE;
}
#ifdef SUPPORT_SGX_MMU_BYPASS
@@ -2303,12 +2193,8 @@ DevMemoryAlloc (BM_CONTEXT *pBMContext,
pMapping->hOSMemHandle,
ui32PDumpSize,
pMapping->pBMHeap->sDevArena.ui32DataPageSize,
-#if defined(SUPPORT_PDUMP_MULTI_PROCESS)
- psDeviceNode->pfnMMUIsHeapShared(pMapping->pBMHeap->pMMUHeap),
-#else
- IMG_FALSE, // unused
-#endif /* SUPPORT_PDUMP_MULTI_PROCESS */
- (IMG_HANDLE)pMapping);
+ (IMG_HANDLE)pMapping,
+ ui32PDumpFlags);
#endif
switch (pMapping->eCpuMemoryOrigin)
@@ -2317,7 +2203,7 @@ DevMemoryAlloc (BM_CONTEXT *pBMContext,
case hm_wrapped_virtaddr:
case hm_contiguous:
{
- if (uFlags & PVRSRV_MEM_SPARSE)
+ if (ui32Flags & PVRSRV_MEM_SPARSE)
{
/* Check if this device supports sparse mappings */
PVR_ASSERT(psDeviceNode->pfnMMUMapPagesSparse != IMG_NULL);
@@ -2328,7 +2214,7 @@ DevMemoryAlloc (BM_CONTEXT *pBMContext,
pMapping->ui32NumVirtChunks,
pMapping->ui32NumPhysChunks,
pMapping->pabMapChunk,
- uFlags,
+ ui32Flags,
(IMG_HANDLE)pMapping);
}
else
@@ -2337,7 +2223,7 @@ DevMemoryAlloc (BM_CONTEXT *pBMContext,
pMapping->DevVAddr,
SysCpuPAddrToSysPAddr (pMapping->CpuPAddr),
pMapping->uSize,
- uFlags,
+ ui32Flags,
(IMG_HANDLE)pMapping);
}
*pDevVAddr = pMapping->DevVAddr;
@@ -2345,7 +2231,7 @@ DevMemoryAlloc (BM_CONTEXT *pBMContext,
}
case hm_env:
{
- if (uFlags & PVRSRV_MEM_SPARSE)
+ if (ui32Flags & PVRSRV_MEM_SPARSE)
{
/* Check if this device supports sparse mappings */
PVR_ASSERT(psDeviceNode->pfnMMUMapShadowSparse != IMG_NULL);
@@ -2358,7 +2244,7 @@ DevMemoryAlloc (BM_CONTEXT *pBMContext,
pMapping->CpuVAddr,
pMapping->hOSMemHandle,
pDevVAddr,
- uFlags,
+ ui32Flags,
(IMG_HANDLE)pMapping);
}
else
@@ -2369,7 +2255,7 @@ DevMemoryAlloc (BM_CONTEXT *pBMContext,
pMapping->CpuVAddr,
pMapping->hOSMemHandle,
pDevVAddr,
- uFlags,
+ ui32Flags,
(IMG_HANDLE)pMapping);
}
break;
@@ -2381,7 +2267,7 @@ DevMemoryAlloc (BM_CONTEXT *pBMContext,
pMapping->DevVAddr,
pMapping->psSysAddr,
pMapping->uSize,
- uFlags,
+ ui32Flags,
(IMG_HANDLE)pMapping);
*pDevVAddr = pMapping->DevVAddr;
@@ -2391,47 +2277,38 @@ DevMemoryAlloc (BM_CONTEXT *pBMContext,
PVR_DPF((PVR_DBG_ERROR,
"Illegal value %d for pMapping->eCpuMemoryOrigin",
pMapping->eCpuMemoryOrigin));
- return -(PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE);
+ return IMG_FALSE;
}
#ifdef SUPPORT_SGX_MMU_BYPASS
DisableHostAccess(pBMContext->psMMUContext);
#endif
- pMapping->ui32MappingCount = 1;
-
- return pMapping->ui32MappingCount;
+ return IMG_TRUE;
}
-static IMG_INT32
+static IMG_VOID
DevMemoryFree (BM_MAPPING *pMapping)
{
PVRSRV_DEVICE_NODE *psDeviceNode;
IMG_DEV_PHYADDR sDevPAddr;
#ifdef PDUMP
IMG_UINT32 ui32PSize;
+ IMG_UINT32 ui32PDumpFlags;
#endif
- if(pMapping->ui32MappingCount > 1)
- {
- pMapping->ui32MappingCount--;
-
- /* Nothing else to do for now */
- return pMapping->ui32MappingCount;
- }
-
- if (pMapping->ui32MappingCount == 0)
- {
- /* already unmapped from GPU.. bail */
- return -(PVRSRV_ERROR_MAPPING_NOT_FOUND);
- }
-
- /* Then pMapping->ui32MappingCount is 1
- * ready to release GPU mapping */
-
psDeviceNode = pMapping->pBMHeap->pBMContext->psDeviceNode;
sDevPAddr = psDeviceNode->pfnMMUGetPhysPageAddr(pMapping->pBMHeap->pMMUHeap, pMapping->DevVAddr);
+#ifdef PDUMP
+#if defined(SUPPORT_PDUMP_MULTI_PROCESS)
+ ui32PDumpFlags = psDeviceNode->pfnMMUIsHeapShared(pMapping->pBMHeap->pMMUHeap)
+ ? PDUMP_FLAGS_PERSISTENT : PDUMP_FLAGS_CONTINUOUS;
+#else
+ ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS;
+#endif
+#endif
+
if (sDevPAddr.uiAddr != 0)
{
#ifdef PDUMP
@@ -2452,20 +2329,17 @@ DevMemoryFree (BM_MAPPING *pMapping)
pMapping->pBMHeap->sDevArena.ui32DataPageSize,
(IMG_HANDLE)pMapping,
(pMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED) ? IMG_TRUE : IMG_FALSE,
- (pMapping->ui32Flags & PVRSRV_MEM_SPARSE) ? IMG_TRUE : IMG_FALSE);
+ (pMapping->ui32Flags & PVRSRV_MEM_SPARSE) ? IMG_TRUE : IMG_FALSE,
+ ui32PDumpFlags);
#endif
}
PVR_ASSERT(pMapping->uSizeVM != 0);
psDeviceNode->pfnMMUFree (pMapping->pBMHeap->pMMUHeap, pMapping->DevVAddr, IMG_CAST_TO_DEVVADDR_UINT(pMapping->uSizeVM));
-
- pMapping->ui32MappingCount = 0;
-
- return pMapping->ui32MappingCount;
}
/* If this array grows larger, it might be preferable to use a hashtable rather than an array. */
#ifndef XPROC_WORKAROUND_NUM_SHAREABLES
-#define XPROC_WORKAROUND_NUM_SHAREABLES 500
+#define XPROC_WORKAROUND_NUM_SHAREABLES 200
#endif
#define XPROC_WORKAROUND_BAD_SHAREINDEX 0773407734
@@ -2480,14 +2354,6 @@ static IMG_UINT32 gXProcWorkaroundState = XPROC_WORKAROUND_UNKNOWN;
/* PRQA S 0686 10 */ /* force compiler to init structure */
XPROC_DATA gXProcWorkaroundShareData[XPROC_WORKAROUND_NUM_SHAREABLES] = {{0}};
-IMG_INT32 BM_XProcGetShareDataRefCount(IMG_UINT32 ui32Index)
-{
- if(ui32Index >= XPROC_WORKAROUND_NUM_SHAREABLES)
- return -1;
-
- return gXProcWorkaroundShareData[ui32Index].ui32RefCount;
-}
-
PVRSRV_ERROR BM_XProcWorkaroundSetShareIndex(IMG_UINT32 ui32Index)
{
/* if you fail this assertion - did you acquire the mutex?
@@ -2581,8 +2447,7 @@ XProcWorkaroundAllocShareable(RA_ARENA *psArena,
if (ui32AllocFlags != gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32AllocFlags)
{
PVR_DPF((PVR_DBG_ERROR,
- "%s ERROR: Flags don't match (Shared 0x%08x, Requested 0x%08x)!",
- __FUNCTION__,
+ "Can't! Flags don't match! (I had 0x%08x, you gave 0x%08x)",
gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32AllocFlags,
ui32AllocFlags));
return PVRSRV_ERROR_INVALID_PARAMS;
@@ -2591,24 +2456,14 @@ XProcWorkaroundAllocShareable(RA_ARENA *psArena,
if (ui32Size != gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32Size)
{
PVR_DPF((PVR_DBG_ERROR,
- "%s ERROR: Size doesn't match (Shared %d, Requested %d) with flags 0x%08x - 0x%08x!",
- __FUNCTION__,
- gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32Size,
- ui32Size,
- gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32AllocFlags,
- ui32AllocFlags));
+ "Can't! Size doesn't match!"));
return PVRSRV_ERROR_INVALID_PARAMS;
}
if (ui32PageSize != gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32PageSize)
{
PVR_DPF((PVR_DBG_ERROR,
- "%s ERROR: Page Size doesn't match (Shared %d, Requested %d) with flags 0x%08x - 0x%08x!",
- __FUNCTION__,
- gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32PageSize,
- ui32PageSize,
- gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32AllocFlags,
- ui32AllocFlags));
+ "Can't! Page Size doesn't match!"));
return PVRSRV_ERROR_INVALID_PARAMS;
}
@@ -2780,7 +2635,7 @@ IMG_VOID _BM_XProcIndexRelease(IMG_UINT32 ui32Index)
}
sSysPAddr = gXProcWorkaroundShareData[ui32Index].sSysPAddr;
RA_Free (gXProcWorkaroundShareData[ui32Index].psArena,
- sSysPAddr.uiAddr,
+ (IMG_UINTPTR_T)sSysPAddr.uiAddr,
IMG_FALSE);
}
else
@@ -2827,7 +2682,7 @@ static IMG_VOID XProcWorkaroundFreeShareable(IMG_HANDLE hOSMemHandle)
which may be >= requested size
@Output ppsMapping - receives the arbitrary user reference
associated with the underlying storage.
- @Input uFlags - bit mask of allocation flags
+ @Input ui32Flags - bit mask of allocation flags
@Input pvPrivData - opaque private data passed through to allocator
@Input ui32PrivDataLength - length of opaque private data
@Output pBase - receives a pointer to the allocated storage.
@@ -2841,7 +2696,7 @@ BM_ImportMemory (IMG_VOID *pH,
IMG_SIZE_T uRequestSize,
IMG_SIZE_T *pActualSize,
BM_MAPPING **ppsMapping,
- IMG_UINT32 uFlags,
+ IMG_UINT32 ui32Flags,
IMG_PVOID pvPrivData,
IMG_UINT32 ui32PrivDataLength,
IMG_UINTPTR_T *pBase)
@@ -2849,14 +2704,15 @@ BM_ImportMemory (IMG_VOID *pH,
BM_MAPPING *pMapping;
BM_HEAP *pBMHeap = pH;
BM_CONTEXT *pBMContext = pBMHeap->pBMContext;
- IMG_INT32 uResult;
+ IMG_BOOL bResult;
IMG_SIZE_T uSize;
IMG_SIZE_T uPSize;
IMG_SIZE_T uDevVAddrAlignment = 0; /* ? */
PVR_DPF ((PVR_DBG_MESSAGE,
- "BM_ImportMemory (pBMContext=0x%x, uRequestSize=0x%x, uFlags=0x%x, uAlign=0x%x)",
- (IMG_UINTPTR_T)pBMContext, uRequestSize, uFlags, uDevVAddrAlignment));
+ "BM_ImportMemory (pBMContext=0x%p, uRequestSize=0x%" SIZE_T_FMT_LEN
+ "x, ui32Flags=0x%x, uAlign=0x%" SIZE_T_FMT_LEN "x)",
+ pBMContext, uRequestSize, ui32Flags, uDevVAddrAlignment));
PVR_ASSERT (ppsMapping != IMG_NULL);
PVR_ASSERT (pBMContext != IMG_NULL);
@@ -2882,15 +2738,14 @@ BM_ImportMemory (IMG_VOID *pH,
pMapping->hOSMemHandle = 0;
pMapping->CpuVAddr = 0;
pMapping->DevVAddr.uiAddr = 0;
- pMapping->ui32MappingCount = 0;
pMapping->CpuPAddr.uiAddr = 0;
pMapping->uSize = uSize;
- if ((uFlags & PVRSRV_MEM_SPARSE) == 0)
+ if ((ui32Flags & PVRSRV_MEM_SPARSE) == 0)
{
pMapping->uSizeVM = uSize;
}
pMapping->pBMHeap = pBMHeap;
- pMapping->ui32Flags = uFlags;
+ pMapping->ui32Flags = ui32Flags;
/*
* If anyone want's to know, pass back the actual size of our allocation.
@@ -2912,16 +2767,11 @@ BM_ImportMemory (IMG_VOID *pH,
uPSize = pMapping->uSize;
}
- if (uFlags & PVRSRV_MEM_XPROC)
+ if (ui32Flags & PVRSRV_MEM_XPROC)
{
IMG_UINT32 ui32Attribs = pBMHeap->ui32Attribs | PVRSRV_MEM_XPROC;
IMG_BOOL bBadBackingStoreType;
- if(uFlags & PVRSRV_MEM_ION)
- {
- ui32Attribs |= PVRSRV_MEM_ION;
- }
-
bBadBackingStoreType = IMG_TRUE;
if ((ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG) != 0)
@@ -2956,7 +2806,7 @@ BM_ImportMemory (IMG_VOID *pH,
&pMapping->hOSMemHandle) != PVRSRV_OK)
{
PVR_DPF((PVR_DBG_ERROR,
- "BM_ImportMemory: XProcWorkaroundAllocShareable(0x%x) failed",
+ "BM_ImportMemory: XProcWorkaroundAllocShareable(0x%" SIZE_T_FMT_LEN "x) failed",
uPSize));
goto fail_mapping_alloc;
}
@@ -3000,7 +2850,7 @@ BM_ImportMemory (IMG_VOID *pH,
&pMapping->hOSMemHandle) != PVRSRV_OK)
{
PVR_DPF((PVR_DBG_ERROR,
- "BM_ImportMemory: XProcWorkaroundAllocShareable(0x%x) failed",
+ "BM_ImportMemory: XProcWorkaroundAllocShareable(0x%" SIZE_T_FMT_LEN "x) failed",
uPSize));
goto fail_mapping_alloc;
}
@@ -3060,7 +2910,7 @@ BM_ImportMemory (IMG_VOID *pH,
&pMapping->hOSMemHandle) != PVRSRV_OK)
{
PVR_DPF((PVR_DBG_ERROR,
- "BM_ImportMemory: OSAllocPages(0x%x) failed",
+ "BM_ImportMemory: OSAllocPages(0x%" SIZE_T_FMT_LEN "x) failed",
uPSize));
goto fail_mapping_alloc;
}
@@ -3102,7 +2952,7 @@ BM_ImportMemory (IMG_VOID *pH,
ui32PrivDataLength,
(IMG_UINTPTR_T *)&sSysPAddr.uiAddr))
{
- PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: RA_Alloc(0x%x) FAILED", uPSize));
+ PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: RA_Alloc(0x%" SIZE_T_FMT_LEN "x) FAILED", uPSize));
goto fail_mapping_alloc;
}
@@ -3128,42 +2978,21 @@ BM_ImportMemory (IMG_VOID *pH,
goto fail_mapping_alloc;
}
- if(uFlags & PVRSRV_MEM_ION)
- {
- IMG_UINT32 ui32AddressOffsets[PVRSRV_MAX_NUMBER_OF_MM_BUFFER_PLANES];
- IMG_UINT32 ui32NumAddrOffsets = PVRSRV_MAX_NUMBER_OF_MM_BUFFER_PLANES;
-
- IMG_INT32 retSize = OSGetMemMultiPlaneInfo(pMapping->hOSMemHandle,
- ui32AddressOffsets, &ui32NumAddrOffsets);
-
- if(retSize > 0 && pActualSize)
- {
- *pActualSize = pMapping->uSize = retSize;
- }
- }
-
/*
* Allocate some device memory for what we just allocated.
*/
- /*
- * Do not allocate GPU mapping if NO_GPU_VIRTUAL_ON_ALLOC is requested.
- * In the case where CBI is enabled, this allows for late
- * GPU mapping. This flag is, otherwise, used in cases where only
- * the memory management feature of the driver is utilized, without
- * a need for GPU rendering
- */
- if ((uFlags & (PVRSRV_MEM_SPARSE | PVRSRV_HAP_NO_GPU_VIRTUAL_ON_ALLOC)) == 0)
+ if ((ui32Flags & PVRSRV_MEM_SPARSE) == 0)
{
- uResult = DevMemoryAlloc (pBMContext,
+ bResult = DevMemoryAlloc (pBMContext,
pMapping,
IMG_NULL,
- uFlags,
+ ui32Flags,
(IMG_UINT32)uDevVAddrAlignment,
&pMapping->DevVAddr);
- if (uResult <= 0)
+ if (!bResult)
{
PVR_DPF((PVR_DBG_ERROR,
- "BM_ImportMemory: DevMemoryAlloc(0x%x) failed",
+ "BM_ImportMemory: DevMemoryAlloc(0x%" SIZE_T_FMT_LEN "x) failed",
pMapping->uSize));
goto fail_dev_mem_alloc;
}
@@ -3172,10 +3001,9 @@ BM_ImportMemory (IMG_VOID *pH,
/* PRQA S 3356,3358 1 */
PVR_ASSERT (uDevVAddrAlignment>1?(pMapping->DevVAddr.uiAddr%uDevVAddrAlignment)==0:1);
PVR_ASSERT(pBase);
+ *pBase = pMapping->DevVAddr.uiAddr;
}
- if(pBase)
- *pBase = pMapping->DevVAddr.uiAddr;
*ppsMapping = pMapping;
PVR_DPF ((PVR_DBG_MESSAGE, "BM_ImportMemory: IMG_TRUE"));
@@ -3199,7 +3027,7 @@ fail_dev_mem_alloc:
uPSize = pMapping->uSize;
}
- if (uFlags & PVRSRV_MEM_XPROC)
+ if (ui32Flags & PVRSRV_MEM_XPROC)
{
XProcWorkaroundFreeShareable(pMapping->hOSMemHandle);
}
@@ -3223,7 +3051,7 @@ fail_dev_mem_alloc:
pMapping->hOSMemHandle);
}
sSysPAddr = SysCpuPAddrToSysPAddr(pMapping->CpuPAddr);
- RA_Free (pBMHeap->pLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
+ RA_Free (pBMHeap->pLocalDevMemArena, (IMG_UINTPTR_T)sSysPAddr.uiAddr, IMG_FALSE);
}
}
fail_mapping_alloc:
@@ -3259,8 +3087,8 @@ BM_FreeMemory (IMG_VOID *h, IMG_UINTPTR_T _base, BM_MAPPING *psMapping)
PVR_UNREFERENCED_PARAMETER (_base);
PVR_DPF ((PVR_DBG_MESSAGE,
- "BM_FreeMemory (h=0x%x, base=0x%x, psMapping=0x%x)",
- (IMG_UINTPTR_T)h, _base, (IMG_UINTPTR_T)psMapping));
+ "BM_FreeMemory (h=0x%p, base=0x" UINTPTR_FMT ", psMapping=0x%p)",
+ h, _base, psMapping));
PVR_ASSERT (psMapping != IMG_NULL);
@@ -3315,7 +3143,7 @@ BM_FreeMemory (IMG_VOID *h, IMG_UINTPTR_T _base, BM_MAPPING *psMapping)
sSysPAddr = SysCpuPAddrToSysPAddr(psMapping->CpuPAddr);
- RA_Free (pBMHeap->pLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
+ RA_Free (pBMHeap->pLocalDevMemArena, (IMG_UINTPTR_T)sSysPAddr.uiAddr, IMG_FALSE);
}
else
{
@@ -3326,8 +3154,8 @@ BM_FreeMemory (IMG_VOID *h, IMG_UINTPTR_T _base, BM_MAPPING *psMapping)
/*not nulling pointer, copy on stack*/
PVR_DPF((PVR_DBG_MESSAGE,
- "..BM_FreeMemory (h=0x%x, base=0x%x)",
- (IMG_UINTPTR_T)h, _base));
+ "..BM_FreeMemory (h=0x%p, base=0x" UINTPTR_FMT ")",
+ h, _base));
}
/*!