msm: 8960: Add correct PASR mask support
The current system does not use the correct PASR masks for turning off sections of memory. It will also crash if the SPARSEMEM section_size is changed to smaller values. This fix calculates the correct masks and removes the dependency on the SPARSEMEM section_size. It will accurately calculate the mask regardless of memory size and configuration. MAX_NR_REGIONS has been increased to 32 to account for the largest system containing four memory banks, each divided into 8 sections. Change-Id: Idaf05a06c1430e6d353fddafa305b57e400dfb8c CRs-fixed: 329575 Signed-off-by: Jack Cheung <jackc@codeaurora.org>
This commit is contained in:
@@ -1,7 +1,7 @@
|
||||
/* arch/arm/mach-msm/include/mach/memory.h
|
||||
*
|
||||
* Copyright (C) 2007 Google, Inc.
|
||||
* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
|
||||
* Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved.
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
@@ -23,8 +23,13 @@
|
||||
#define MAX_PHYSMEM_BITS 32
|
||||
#define SECTION_SIZE_BITS 28
|
||||
|
||||
/* Maximum number of Memory Regions */
|
||||
#define MAX_NR_REGIONS 4
|
||||
/* Maximum number of Memory Regions
|
||||
* The largest system can have 4 memory banks, each divided into 8 regions
|
||||
*/
|
||||
#define MAX_NR_REGIONS 32
|
||||
|
||||
/* The number of regions each memory bank is divided into */
|
||||
#define NR_REGIONS_PER_BANK 8
|
||||
|
||||
/* Certain configurations of MSM7x30 have multiple memory banks.
|
||||
* One or more of these banks can contain holes in the memory map as well.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
|
||||
/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
@@ -31,11 +31,11 @@ static struct mem_region_t {
|
||||
/* reserved for future use */
|
||||
u64 num_partitions;
|
||||
int state;
|
||||
int mask;
|
||||
struct mutex state_mutex;
|
||||
} mem_regions[MAX_NR_REGIONS];
|
||||
|
||||
static struct mutex mem_regions_mutex;
|
||||
static unsigned int nr_mem_regions;
|
||||
static int mem_regions_mask;
|
||||
|
||||
enum {
|
||||
STATE_POWER_DOWN = 0x0,
|
||||
@@ -43,14 +43,6 @@ enum {
|
||||
STATE_DEFAULT = STATE_ACTIVE
|
||||
};
|
||||
|
||||
enum {
|
||||
MEM_NO_CHANGE = 0x0,
|
||||
MEM_DEEP_POWER_DOWN,
|
||||
MEM_SELF_REFRESH,
|
||||
};
|
||||
|
||||
static unsigned int dmm_mode;
|
||||
|
||||
static int default_mask = ~0x0;
|
||||
|
||||
/* Return the number of chipselects populated with a memory bank */
|
||||
@@ -111,50 +103,55 @@ static int rpm_change_memory_state(int retention_mask,
|
||||
}
|
||||
}
|
||||
|
||||
static int switch_memory_state(int id, int new_state)
|
||||
static int switch_memory_state(int mask, int new_state, int start_region,
|
||||
int end_region)
|
||||
{
|
||||
int mask = 0;
|
||||
int power_down_masks[MAX_NR_REGIONS] = { 0xFFFFFF00, 0xFFFF00FF,
|
||||
0xFF00FFFF, 0x00FFFFFF };
|
||||
int self_refresh_masks[MAX_NR_REGIONS] = { 0xFFFFFFF0, 0xFFFFFF0F,
|
||||
0xFFFFF0FF, 0xFFFF0FFF };
|
||||
mutex_lock(&mem_regions[id].state_mutex);
|
||||
int final_mask = 0;
|
||||
int i;
|
||||
|
||||
if (new_state == mem_regions[id].state)
|
||||
mutex_lock(&mem_regions_mutex);
|
||||
|
||||
for (i = start_region; i <= end_region; i++) {
|
||||
if (new_state == mem_regions[i].state)
|
||||
goto no_change;
|
||||
/* All region states must be the same to change them */
|
||||
if (mem_regions[i].state != mem_regions[start_region].state)
|
||||
goto no_change;
|
||||
|
||||
pr_info("request memory %d state switch (%d->%d) mode %d\n", id,
|
||||
mem_regions[id].state, new_state, dmm_mode);
|
||||
if (new_state == STATE_POWER_DOWN) {
|
||||
if (dmm_mode == MEM_DEEP_POWER_DOWN)
|
||||
mask = mem_regions[id].mask & power_down_masks[id];
|
||||
else
|
||||
mask = mem_regions[id].mask & self_refresh_masks[id];
|
||||
} else if (new_state == STATE_ACTIVE) {
|
||||
if (dmm_mode == MEM_DEEP_POWER_DOWN)
|
||||
mask = mem_regions[id].mask | (~power_down_masks[id]);
|
||||
else
|
||||
mask = mem_regions[id].mask | (~self_refresh_masks[id]);
|
||||
}
|
||||
|
||||
if (rpm_change_memory_state(mask, mask) == 0) {
|
||||
mem_regions[id].state = new_state;
|
||||
mem_regions[id].mask = mask;
|
||||
pr_info("completed memory %d state switch to %d mode %d\n",
|
||||
id, new_state, dmm_mode);
|
||||
mutex_unlock(&mem_regions[id].state_mutex);
|
||||
if (new_state == STATE_POWER_DOWN)
|
||||
final_mask = mem_regions_mask & mask;
|
||||
else if (new_state == STATE_ACTIVE)
|
||||
final_mask = mem_regions_mask | ~mask;
|
||||
else
|
||||
goto no_change;
|
||||
|
||||
pr_info("request memory %d to %d state switch (%d->%d)\n",
|
||||
start_region, end_region, mem_regions[start_region].state,
|
||||
new_state);
|
||||
if (rpm_change_memory_state(final_mask, final_mask) == 0) {
|
||||
for (i = start_region; i <= end_region; i++)
|
||||
mem_regions[i].state = new_state;
|
||||
mem_regions_mask = final_mask;
|
||||
|
||||
pr_info("completed memory %d to %d state switch to %d\n",
|
||||
start_region, end_region, new_state);
|
||||
mutex_unlock(&mem_regions_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
pr_err("failed memory %d state switch (%d->%d) mode %d\n", id,
|
||||
mem_regions[id].state, new_state, dmm_mode);
|
||||
pr_err("failed memory %d to %d state switch (%d->%d)\n",
|
||||
start_region, end_region, mem_regions[start_region].state,
|
||||
new_state);
|
||||
|
||||
no_change:
|
||||
mutex_unlock(&mem_regions[id].state_mutex);
|
||||
mutex_unlock(&mem_regions_mutex);
|
||||
return -EINVAL;
|
||||
}
|
||||
#else
|
||||
|
||||
static int switch_memory_state(int id, int new_state)
|
||||
static int switch_memory_state(int mask, int new_state, int start_region,
|
||||
int end_region)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -165,29 +162,34 @@ static int switch_memory_state(int id, int new_state)
|
||||
*/
|
||||
int soc_change_memory_power(u64 start, u64 size, int change)
|
||||
{
|
||||
|
||||
int i = 0;
|
||||
int match = 0;
|
||||
|
||||
/* Find the memory region starting just below start */
|
||||
for (i = 0; i < nr_mem_regions; i++) {
|
||||
if (mem_regions[i].start <= start &&
|
||||
mem_regions[i].start >= mem_regions[match].start) {
|
||||
match = i;
|
||||
}
|
||||
}
|
||||
|
||||
if (start + size > mem_regions[match].start + mem_regions[match].size) {
|
||||
pr_info("passed size exceeds size of memory bank\n");
|
||||
return 0;
|
||||
}
|
||||
int mask = default_mask;
|
||||
u64 end = start + size;
|
||||
int start_region = 0;
|
||||
int end_region = 0;
|
||||
|
||||
if (change != STATE_ACTIVE && change != STATE_POWER_DOWN) {
|
||||
pr_info("requested state transition invalid\n");
|
||||
return 0;
|
||||
}
|
||||
/* Find the memory regions that fall within the range */
|
||||
for (i = 0; i < nr_mem_regions; i++) {
|
||||
if (mem_regions[i].start <= start &&
|
||||
mem_regions[i].start >=
|
||||
mem_regions[start_region].start) {
|
||||
start_region = i;
|
||||
}
|
||||
if (end <= mem_regions[i].start + mem_regions[i].size) {
|
||||
end_region = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!switch_memory_state(match, change))
|
||||
/* Set the bitmask for each region in the range */
|
||||
for (i = start_region; i <= end_region; i++)
|
||||
mask &= ~(0x1 << i);
|
||||
|
||||
if (!switch_memory_state(mask, change, start_region, end_region))
|
||||
return size;
|
||||
else
|
||||
return 0;
|
||||
@@ -212,9 +214,10 @@ unsigned int get_memory_bank_start(unsigned int id)
|
||||
|
||||
int __init meminfo_init(unsigned int type, unsigned int min_bank_size)
|
||||
{
|
||||
unsigned int i;
|
||||
unsigned int i, j;
|
||||
unsigned long bank_size;
|
||||
unsigned long bank_start;
|
||||
unsigned long region_size;
|
||||
struct smem_ram_ptable *ram_ptable;
|
||||
/* physical memory banks */
|
||||
unsigned int nr_mem_banks = 0;
|
||||
@@ -229,40 +232,35 @@ int __init meminfo_init(unsigned int type, unsigned int min_bank_size)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Determine power control mode based on the hw version */
|
||||
/* This check will be removed when PASR is fully supported */
|
||||
if (cpu_is_msm8960() &&
|
||||
SOCINFO_VERSION_MAJOR(socinfo_get_version()) < 2)
|
||||
dmm_mode = MEM_DEEP_POWER_DOWN;
|
||||
else
|
||||
dmm_mode = MEM_SELF_REFRESH;
|
||||
|
||||
pr_info("meminfo_init: smem ram ptable found: ver: %d len: %d\n",
|
||||
ram_ptable->version, ram_ptable->len);
|
||||
|
||||
for (i = 0; i < ram_ptable->len; i++) {
|
||||
/* A bank is valid only if is greater than min_bank_size. If
|
||||
* non-valid memory (e.g. modem memory) became greater than
|
||||
* min_bank_size, there is currently no way to differentiate.
|
||||
*/
|
||||
if (ram_ptable->parts[i].type == type &&
|
||||
ram_ptable->parts[i].size >= min_bank_size) {
|
||||
bank_start = ram_ptable->parts[i].start;
|
||||
bank_size = ram_ptable->parts[i].size;
|
||||
/* Divide into logical memory regions of same size */
|
||||
while (bank_size) {
|
||||
region_size = bank_size / NR_REGIONS_PER_BANK;
|
||||
|
||||
for (j = 0; j < NR_REGIONS_PER_BANK; j++) {
|
||||
mem_regions[nr_mem_regions].start =
|
||||
bank_start;
|
||||
mem_regions[nr_mem_regions].size =
|
||||
MIN_MEMORY_BLOCK_SIZE;
|
||||
mutex_init(&mem_regions[nr_mem_regions]
|
||||
.state_mutex);
|
||||
region_size;
|
||||
mem_regions[nr_mem_regions].state =
|
||||
STATE_DEFAULT;
|
||||
mem_regions[nr_mem_regions].mask = default_mask;
|
||||
bank_start += MIN_MEMORY_BLOCK_SIZE;
|
||||
bank_size -= MIN_MEMORY_BLOCK_SIZE;
|
||||
bank_start += region_size;
|
||||
nr_mem_regions++;
|
||||
}
|
||||
nr_mem_banks++;
|
||||
}
|
||||
}
|
||||
mutex_init(&mem_regions_mutex);
|
||||
mem_regions_mask = default_mask;
|
||||
pr_info("Found %d memory banks grouped into %d memory regions\n",
|
||||
nr_mem_banks, nr_mem_regions);
|
||||
return 0;
|
||||
|
||||
Reference in New Issue
Block a user