Blackfin: support smaller uncached DMA chunks for memory constrained systems
When working with 8 meg systems, forcing a 1 meg DMA chunk heavily cuts into the available resources. So support smaller chunks to better cover needs for these systems. Signed-off-by: Barry Song <barry.song@analog.com> Signed-off-by: Mike Frysinger <vapier@gentoo.org>
This commit is contained in:
parent
5df326aca4
commit
c45c06596e
|
@ -896,6 +896,12 @@ config DMA_UNCACHED_2M
|
||||||
bool "Enable 2M DMA region"
|
bool "Enable 2M DMA region"
|
||||||
config DMA_UNCACHED_1M
|
config DMA_UNCACHED_1M
|
||||||
bool "Enable 1M DMA region"
|
bool "Enable 1M DMA region"
|
||||||
|
config DMA_UNCACHED_512K
|
||||||
|
bool "Enable 512K DMA region"
|
||||||
|
config DMA_UNCACHED_256K
|
||||||
|
bool "Enable 256K DMA region"
|
||||||
|
config DMA_UNCACHED_128K
|
||||||
|
bool "Enable 128K DMA region"
|
||||||
config DMA_UNCACHED_NONE
|
config DMA_UNCACHED_NONE
|
||||||
bool "Disable DMA region"
|
bool "Disable DMA region"
|
||||||
endchoice
|
endchoice
|
||||||
|
|
|
@ -20,6 +20,12 @@
|
||||||
# define DMA_UNCACHED_REGION (2 * 1024 * 1024)
|
# define DMA_UNCACHED_REGION (2 * 1024 * 1024)
|
||||||
#elif defined(CONFIG_DMA_UNCACHED_1M)
|
#elif defined(CONFIG_DMA_UNCACHED_1M)
|
||||||
# define DMA_UNCACHED_REGION (1024 * 1024)
|
# define DMA_UNCACHED_REGION (1024 * 1024)
|
||||||
|
#elif defined(CONFIG_DMA_UNCACHED_512K)
|
||||||
|
# define DMA_UNCACHED_REGION (512 * 1024)
|
||||||
|
#elif defined(CONFIG_DMA_UNCACHED_256K)
|
||||||
|
# define DMA_UNCACHED_REGION (256 * 1024)
|
||||||
|
#elif defined(CONFIG_DMA_UNCACHED_128K)
|
||||||
|
# define DMA_UNCACHED_REGION (128 * 1024)
|
||||||
#else
|
#else
|
||||||
# define DMA_UNCACHED_REGION (0)
|
# define DMA_UNCACHED_REGION (0)
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -89,15 +89,25 @@ void __init generate_cplb_tables_cpu(unsigned int cpu)
|
||||||
|
|
||||||
void __init generate_cplb_tables_all(void)
|
void __init generate_cplb_tables_all(void)
|
||||||
{
|
{
|
||||||
|
unsigned long uncached_end;
|
||||||
int i_d, i_i;
|
int i_d, i_i;
|
||||||
|
|
||||||
i_d = 0;
|
i_d = 0;
|
||||||
/* Normal RAM, including MTD FS. */
|
/* Normal RAM, including MTD FS. */
|
||||||
#ifdef CONFIG_MTD_UCLINUX
|
#ifdef CONFIG_MTD_UCLINUX
|
||||||
dcplb_bounds[i_d].eaddr = memory_mtd_start + mtd_size;
|
uncached_end = memory_mtd_start + mtd_size;
|
||||||
#else
|
#else
|
||||||
dcplb_bounds[i_d].eaddr = memory_end;
|
uncached_end = memory_end;
|
||||||
#endif
|
#endif
|
||||||
|
/*
|
||||||
|
* if DMA uncached is less than 1MB, mark the 1MB chunk as uncached
|
||||||
|
* so that we don't have to use 4kB pages and cause CPLB thrashing
|
||||||
|
*/
|
||||||
|
if ((DMA_UNCACHED_REGION >= 1 * 1024 * 1024) || !DMA_UNCACHED_REGION ||
|
||||||
|
((_ramend - uncached_end) >= 1 * 1024 * 1024))
|
||||||
|
dcplb_bounds[i_d].eaddr = uncached_end;
|
||||||
|
else
|
||||||
|
dcplb_bounds[i_d].eaddr = uncached_end & ~(1 * 1024 * 1024);
|
||||||
dcplb_bounds[i_d++].data = SDRAM_DGENERIC;
|
dcplb_bounds[i_d++].data = SDRAM_DGENERIC;
|
||||||
/* DMA uncached region. */
|
/* DMA uncached region. */
|
||||||
if (DMA_UNCACHED_REGION) {
|
if (DMA_UNCACHED_REGION) {
|
||||||
|
@ -135,11 +145,7 @@ void __init generate_cplb_tables_all(void)
|
||||||
|
|
||||||
i_i = 0;
|
i_i = 0;
|
||||||
/* Normal RAM, including MTD FS. */
|
/* Normal RAM, including MTD FS. */
|
||||||
#ifdef CONFIG_MTD_UCLINUX
|
icplb_bounds[i_i].eaddr = uncached_end;
|
||||||
icplb_bounds[i_i].eaddr = memory_mtd_start + mtd_size;
|
|
||||||
#else
|
|
||||||
icplb_bounds[i_i].eaddr = memory_end;
|
|
||||||
#endif
|
|
||||||
icplb_bounds[i_i++].data = SDRAM_IGENERIC;
|
icplb_bounds[i_i++].data = SDRAM_IGENERIC;
|
||||||
/* DMA uncached region. */
|
/* DMA uncached region. */
|
||||||
if (DMA_UNCACHED_REGION) {
|
if (DMA_UNCACHED_REGION) {
|
||||||
|
|
Reference in New Issue