#include "radeon_driver.h"
#include "mmio.h"
#include "rbbm_regs.h"
#include "dma_regs.h"
#include <string.h>
#define RADEON_MAX_DMA_SIZE 16*1024*1024
status_t Radeon_InitDMA( device_info *di )
{
status_t res;
di->dma_desc_max_num = RADEON_MAX_DMA_SIZE / 4096;
res = mem_alloc( di->memmgr[mt_local], di->dma_desc_max_num * sizeof( DMA_descriptor ), 0,
&di->dma_desc_handle, &di->dma_desc_offset );
if( res != B_OK )
return res;
OUTREGP( di->regs, RADEON_GEN_INT_CNTL, RADEON_VIDDMA_MASK, ~RADEON_VIDDMA_MASK );
OUTREG( di->regs, RADEON_GEN_INT_STATUS, RADEON_VIDDMA_AK );
return B_OK;
}
static status_t Radeon_PrepareDMA(
device_info *di, uint32 src, char *target, size_t size, bool lock_mem, bool contiguous )
{
physical_entry map[16];
status_t res;
DMA_descriptor *cur_desc;
int num_desc;
if( lock_mem && !contiguous ) {
res = lock_memory( target, size, B_DMA_IO | B_READ_DEVICE );
if( res != B_OK ) {
SHOW_ERROR( 2, "Cannot lock memory (%s)", strerror( res ));
return res;
}
}
src += di->si->memory[mt_local].virtual_addr_start;
cur_desc = (DMA_descriptor *)(di->si->local_mem + di->dma_desc_offset);
num_desc = 0;
while( size > 0 ) {
int i;
if( contiguous ) {
get_memory_map( target, 1, map, 16 );
map[0].size = size;
} else {
get_memory_map( target, size, map, 16 );
}
for( i = 0; i < 16; ++i ) {
phys_addr_t address = map[i].address;
size_t contig_size = map[i].size;
if( contig_size == 0 )
break;
#if B_HAIKU_PHYSICAL_BITS > 32
if (address + contig_size > (phys_addr_t)1 << 32) {
SHOW_ERROR(2, "Physical address > 4 GB: %#" B_PRIxPHYSADDR
"size: %#" B_PRIxSIZE, address, size);
res = B_BAD_VALUE;
goto err;
}
#endif
target += contig_size;
while( contig_size > 0 ) {
size_t cur_size;
cur_size = min( contig_size, RADEON_DMA_DESC_MAX_SIZE );
if( ++num_desc > (int)di->dma_desc_max_num ) {
SHOW_ERROR( 2, "Overflow of DMA descriptors, %ld bytes left", size );
res = B_BAD_VALUE;
goto err;
}
cur_desc->src_address = src;
cur_desc->dest_address = address;
cur_desc->command = cur_size;
cur_desc->res = 0;
++cur_desc;
address += cur_size;
contig_size -= cur_size;
src += cur_size;
size -= cur_size;
}
}
}
(cur_desc - 1)->command |= RADEON_DMA_COMMAND_EOL;
return B_OK;
err:
if( lock_mem && !contiguous )
unlock_memory( target, size, B_DMA_IO| B_READ_DEVICE );
return res;
}
static void Radeon_FinishDMA(
device_info *di, uint32 src, char *target, size_t size, bool lock_mem, bool contiguous )
{
if( lock_mem && !contiguous )
unlock_memory( target, size, B_DMA_IO| B_READ_DEVICE );
}
status_t Radeon_DMACopy(
device_info *di, uint32 src, char *target, size_t size, bool lock_mem, bool contiguous )
{
status_t res;
res = Radeon_PrepareDMA( di, src, target, size, lock_mem, contiguous );
if( res != B_OK )
return res;
OUTREG( di->regs, RADEON_DMA_VID_TABLE_ADDR, di->si->memory[mt_local].virtual_addr_start +
di->dma_desc_offset );
res = acquire_sem_etc( di->dma_sem, 1, B_RELATIVE_TIMEOUT, 1000000 );
while( (INREG( di->regs, RADEON_DMA_VID_STATUS ) & RADEON_DMA_STATUS_ACTIVE) != 0 ) {
SHOW_FLOW0( 0, "DMA transmission still active" );
snooze( 1000 );
}
Radeon_FinishDMA( di, src, target, size, lock_mem, contiguous );
return res;
}