From aaad2b0c757f3e6e02552cb0bdcd91a5ec0d6305 Mon Sep 17 00:00:00 2001 From: Jiri Kosina Date: Wed, 23 Apr 2008 16:39:43 +0200 Subject: [PATCH 0001/1435] HID: fix report descriptor handling for MS Wireless model 1028 Fix a typo in report descriptor fixup, which results in improper substitution and leaving old value in place. Reported-by: Juha Motorsportcom Signed-off-by: Jiri Kosina --- drivers/hid/usbhid/hid-quirks.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 28ddc3fdd3d..8d2bc93f816 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -1113,7 +1113,7 @@ static void usbhid_fixup_microsoft_descriptor(unsigned char *rdesc, int rsize) && rdesc[557] == 0x19 && rdesc[559] == 0x29) { printk(KERN_INFO "Fixing up Microsoft Wireless Receiver Model 1028 report descriptor\n"); - rdesc[284] = rdesc[304] = rdesc[558] = 0x35; + rdesc[284] = rdesc[304] = rdesc[557] = 0x35; rdesc[352] = 0x36; rdesc[286] = rdesc[355] = 0x46; rdesc[306] = rdesc[559] = 0x45; From a953e4597abd51b74c99e0e3b7074532a60fd031 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Mon, 12 May 2008 21:21:12 +0200 Subject: [PATCH 0002/1435] sched: replace MAX_NUMNODES with nr_node_ids in kernel/sched.c * Replace usages of MAX_NUMNODES with nr_node_ids in kernel/sched.c, where appropriate. This saves some allocated space as well as many wasted cycles going through node entries that are non-existent. For inclusion into sched-devel/latest tree. Based on: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git + sched-devel/latest .../mingo/linux-2.6-sched-devel.git Signed-off-by: Mike Travis Signed-off-by: Ingo Molnar --- kernel/sched.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/kernel/sched.c b/kernel/sched.c index cfa222a9153..1ed8011db82 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -6879,9 +6879,9 @@ static int find_next_best_node(int node, nodemask_t *used_nodes) min_val = INT_MAX; - for (i = 0; i < MAX_NUMNODES; i++) { + for (i = 0; i < nr_node_ids; i++) { /* Start at @node */ - n = (node + i) % MAX_NUMNODES; + n = (node + i) % nr_node_ids; if (!nr_cpus_node(n)) continue; @@ -7075,7 +7075,7 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask) if (!sched_group_nodes) continue; - for (i = 0; i < MAX_NUMNODES; i++) { + for (i = 0; i < nr_node_ids; i++) { struct sched_group *oldsg, *sg = sched_group_nodes[i]; *nodemask = node_to_cpumask(i); @@ -7263,7 +7263,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, /* * Allocate the per-node list of sched groups */ - sched_group_nodes = kcalloc(MAX_NUMNODES, sizeof(struct sched_group *), + sched_group_nodes = kcalloc(nr_node_ids, sizeof(struct sched_group *), GFP_KERNEL); if (!sched_group_nodes) { printk(KERN_WARNING "Can not alloc sched group node list\n"); @@ -7407,7 +7407,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, #endif /* Set up physical groups */ - for (i = 0; i < MAX_NUMNODES; i++) { + for (i = 0; i < nr_node_ids; i++) { SCHED_CPUMASK_VAR(nodemask, allmasks); SCHED_CPUMASK_VAR(send_covered, allmasks); @@ -7431,7 +7431,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, send_covered, tmpmask); } - for (i = 0; i < MAX_NUMNODES; i++) { + for (i = 0; i < nr_node_ids; i++) { /* Set up node groups */ struct sched_group *sg, *prev; SCHED_CPUMASK_VAR(nodemask, allmasks); @@ -7470,9 +7470,9 @@ static int __build_sched_domains(const cpumask_t *cpu_map, cpus_or(*covered, *covered, *nodemask); prev = sg; - for (j = 0; j < MAX_NUMNODES; j++) { + for (j = 0; j < nr_node_ids; j++) { SCHED_CPUMASK_VAR(notcovered, allmasks); - int n = (i + j) % MAX_NUMNODES; + int n = (i + j) % nr_node_ids; node_to_cpumask_ptr(pnodemask, n); cpus_complement(*notcovered, *covered); @@ -7525,7 +7525,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, } #ifdef CONFIG_NUMA - for (i = 0; i < MAX_NUMNODES; i++) + for (i = 0; i < nr_node_ids; i++) init_numa_sched_groups_power(sched_group_nodes[i]); if (sd_allnodes) { From 143aa5c53bd3895d42d7c08753fe58293988a69d Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Mon, 12 May 2008 21:21:13 +0200 Subject: [PATCH 0003/1435] cpu: change some globals to statics in drivers/base/cpu.c v2 This patch makes the following needlessly global code static: - attr_online_map - attr_possible_map - attr_present_map - cpu_state_attr [v2] Signed-off-by: Adrian Bunk Signed-off-by: Mike Travis Signed-off-by: Ingo Molnar --- drivers/base/cpu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index e38dfed41d8..5000402ae09 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -119,14 +119,14 @@ static ssize_t print_cpus_##type(struct sysdev_class *class, char *buf) \ { \ return print_cpus_map(buf, &cpu_##type##_map); \ } \ -struct sysdev_class_attribute attr_##type##_map = \ +static struct sysdev_class_attribute attr_##type##_map = \ _SYSDEV_CLASS_ATTR(type, 0444, print_cpus_##type, NULL) print_cpus_func(online); print_cpus_func(possible); print_cpus_func(present); -struct sysdev_class_attribute *cpu_state_attr[] = { +static struct sysdev_class_attribute *cpu_state_attr[] = { &attr_online_map, &attr_possible_map, &attr_present_map, From 41df0d61c266998b8049df7fec119cd518a43aa1 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Mon, 12 May 2008 21:21:13 +0200 Subject: [PATCH 0004/1435] x86: Add performance variants of cpumask operators * Increase performance for systems with large count NR_CPUS by limiting the range of the cpumask operators that loop over the bits in a cpumask_t variable. This removes a large amount of wasted cpu cycles. * Add performance variants of the cpumask operators: int cpus_weight_nr(mask) Same using nr_cpu_ids instead of NR_CPUS int first_cpu_nr(mask) Number lowest set bit, or nr_cpu_ids int next_cpu_nr(cpu, mask) Next cpu past 'cpu', or nr_cpu_ids for_each_cpu_mask_nr(cpu, mask) for-loop cpu over mask using nr_cpu_ids * Modify following to use performance variants: #define num_online_cpus() cpus_weight_nr(cpu_online_map) #define num_possible_cpus() cpus_weight_nr(cpu_possible_map) #define num_present_cpus() cpus_weight_nr(cpu_present_map) #define for_each_possible_cpu(cpu) for_each_cpu_mask_nr((cpu), ...) #define for_each_online_cpu(cpu) for_each_cpu_mask_nr((cpu), ...) #define for_each_present_cpu(cpu) for_each_cpu_mask_nr((cpu), ...) * Comment added to include/linux/cpumask.h: Note: The alternate operations with the suffix "_nr" are used to limit the range of the loop to nr_cpu_ids instead of NR_CPUS when NR_CPUS > 64 for performance reasons. If NR_CPUS is <= 64 then most assembler bitmask operators execute faster with a constant range, so the operator will continue to use NR_CPUS. Another consideration is that nr_cpu_ids is initialized to NR_CPUS and isn't lowered until the possible cpus are discovered (including any disabled cpus). So early uses will span the entire range of NR_CPUS. (The net effect is that for systems with 64 or less CPU's there are no functional changes.) For inclusion into sched-devel/latest tree. Based on: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git + sched-devel/latest .../mingo/linux-2.6-sched-devel.git Cc: Paul Jackson Cc: Christoph Lameter Reviewed-by: Paul Jackson Reviewed-by: Christoph Lameter Signed-off-by: Mike Travis Signed-off-by: Ingo Molnar --- include/linux/cpumask.h | 92 +++++++++++++++++++++++++++-------------- lib/cpumask.c | 9 ++++ 2 files changed, 71 insertions(+), 30 deletions(-) diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 5df3db58fcc..b49472d1af8 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -17,6 +17,20 @@ * For details of cpus_onto(), see bitmap_onto in lib/bitmap.c. * For details of cpus_fold(), see bitmap_fold in lib/bitmap.c. * + * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . + * Note: The alternate operations with the suffix "_nr" are used + * to limit the range of the loop to nr_cpu_ids instead of + * NR_CPUS when NR_CPUS > 64 for performance reasons. + * If NR_CPUS is <= 64 then most assembler bitmask + * operators execute faster with a constant range, so + * the operator will continue to use NR_CPUS. + * + * Another consideration is that nr_cpu_ids is initialized + * to NR_CPUS and isn't lowered until the possible cpus are + * discovered (including any disabled cpus). So early uses + * will span the entire range of NR_CPUS. + * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . + * * The available cpumask operations are: * * void cpu_set(cpu, mask) turn on bit 'cpu' in mask @@ -38,12 +52,14 @@ * int cpus_empty(mask) Is mask empty (no bits sets)? * int cpus_full(mask) Is mask full (all bits sets)? * int cpus_weight(mask) Hamming weigh - number of set bits + * int cpus_weight_nr(mask) Same using nr_cpu_ids instead of NR_CPUS * * void cpus_shift_right(dst, src, n) Shift right * void cpus_shift_left(dst, src, n) Shift left * * int first_cpu(mask) Number lowest set bit, or NR_CPUS * int next_cpu(cpu, mask) Next cpu past 'cpu', or NR_CPUS + * int next_cpu_nr(cpu, mask) Next cpu past 'cpu', or nr_cpu_ids * * cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set * CPU_MASK_ALL Initializer - all bits set @@ -59,7 +75,8 @@ * void cpus_onto(dst, orig, relmap) *dst = orig relative to relmap * void cpus_fold(dst, orig, sz) dst bits = orig bits mod sz * - * for_each_cpu_mask(cpu, mask) for-loop cpu over mask + * for_each_cpu_mask(cpu, mask) for-loop cpu over mask using NR_CPUS + * for_each_cpu_mask_nr(cpu, mask) for-loop cpu over mask using nr_cpu_ids * * int num_online_cpus() Number of online CPUs * int num_possible_cpus() Number of all possible CPUs @@ -216,15 +233,6 @@ static inline void __cpus_shift_left(cpumask_t *dstp, bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); } -#ifdef CONFIG_SMP -int __first_cpu(const cpumask_t *srcp); -#define first_cpu(src) __first_cpu(&(src)) -int __next_cpu(int n, const cpumask_t *srcp); -#define next_cpu(n, src) __next_cpu((n), &(src)) -#else -#define first_cpu(src) ({ (void)(src); 0; }) -#define next_cpu(n, src) ({ (void)(src); 1; }) -#endif #ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP extern cpumask_t *cpumask_of_cpu_map; @@ -343,15 +351,48 @@ static inline void __cpus_fold(cpumask_t *dstp, const cpumask_t *origp, bitmap_fold(dstp->bits, origp->bits, sz, nbits); } -#if NR_CPUS > 1 +#if NR_CPUS == 1 + +#define nr_cpu_ids 1 +#define first_cpu(src) ({ (void)(src); 0; }) +#define next_cpu(n, src) ({ (void)(src); 1; }) +#define any_online_cpu(mask) 0 +#define for_each_cpu_mask(cpu, mask) \ + for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) + +#else /* NR_CPUS > 1 */ + +extern int nr_cpu_ids; +int __first_cpu(const cpumask_t *srcp); +int __next_cpu(int n, const cpumask_t *srcp); +int __any_online_cpu(const cpumask_t *mask); + +#define first_cpu(src) __first_cpu(&(src)) +#define next_cpu(n, src) __next_cpu((n), &(src)) +#define any_online_cpu(mask) __any_online_cpu(&(mask)) #define for_each_cpu_mask(cpu, mask) \ for ((cpu) = first_cpu(mask); \ (cpu) < NR_CPUS; \ (cpu) = next_cpu((cpu), (mask))) -#else /* NR_CPUS == 1 */ -#define for_each_cpu_mask(cpu, mask) \ - for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) -#endif /* NR_CPUS */ +#endif + +#if NR_CPUS <= 64 + +#define next_cpu_nr(n, src) next_cpu(n, src) +#define cpus_weight_nr(cpumask) cpus_weight(cpumask) +#define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask) + +#else /* NR_CPUS > 64 */ + +int __next_cpu_nr(int n, const cpumask_t *srcp); +#define next_cpu_nr(n, src) __next_cpu_nr((n), &(src)) +#define cpus_weight_nr(cpumask) __cpus_weight(&(cpumask), nr_cpu_ids) +#define for_each_cpu_mask_nr(cpu, mask) \ + for ((cpu) = first_cpu(mask); \ + (cpu) < nr_cpu_ids; \ + (cpu) = next_cpu_nr((cpu), (mask))) + +#endif /* NR_CPUS > 64 */ /* * The following particular system cpumasks and operations manage @@ -414,9 +455,9 @@ extern cpumask_t cpu_online_map; extern cpumask_t cpu_present_map; #if NR_CPUS > 1 -#define num_online_cpus() cpus_weight(cpu_online_map) -#define num_possible_cpus() cpus_weight(cpu_possible_map) -#define num_present_cpus() cpus_weight(cpu_present_map) +#define num_online_cpus() cpus_weight_nr(cpu_online_map) +#define num_possible_cpus() cpus_weight_nr(cpu_possible_map) +#define num_present_cpus() cpus_weight_nr(cpu_present_map) #define cpu_online(cpu) cpu_isset((cpu), cpu_online_map) #define cpu_possible(cpu) cpu_isset((cpu), cpu_possible_map) #define cpu_present(cpu) cpu_isset((cpu), cpu_present_map) @@ -431,17 +472,8 @@ extern cpumask_t cpu_present_map; #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) -#ifdef CONFIG_SMP -extern int nr_cpu_ids; -#define any_online_cpu(mask) __any_online_cpu(&(mask)) -int __any_online_cpu(const cpumask_t *mask); -#else -#define nr_cpu_ids 1 -#define any_online_cpu(mask) 0 -#endif - -#define for_each_possible_cpu(cpu) for_each_cpu_mask((cpu), cpu_possible_map) -#define for_each_online_cpu(cpu) for_each_cpu_mask((cpu), cpu_online_map) -#define for_each_present_cpu(cpu) for_each_cpu_mask((cpu), cpu_present_map) +#define for_each_possible_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_possible_map) +#define for_each_online_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_online_map) +#define for_each_present_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_present_map) #endif /* __LINUX_CPUMASK_H */ diff --git a/lib/cpumask.c b/lib/cpumask.c index bb4f76d3c3e..5f97dc25ef9 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -15,6 +15,15 @@ int __next_cpu(int n, const cpumask_t *srcp) } EXPORT_SYMBOL(__next_cpu); +#if NR_CPUS > 64 +int __next_cpu_nr(int n, const cpumask_t *srcp) +{ + return min_t(int, nr_cpu_ids, + find_next_bit(srcp->bits, nr_cpu_ids, n+1)); +} +EXPORT_SYMBOL(__next_cpu_nr); +#endif + int __any_online_cpu(const cpumask_t *mask) { int cpu; From 141ad0688adb53094d6f75b39b4b3b0625de0e07 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Mon, 12 May 2008 21:21:13 +0200 Subject: [PATCH 0005/1435] acpi: use performance variant for_each_cpu_mask_nr Change references from for_each_cpu_mask to for_each_cpu_mask_nr where appropriate Reviewed-by: Paul Jackson Reviewed-by: Christoph Lameter Signed-off-by: Mike Travis Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- drivers/acpi/processor_throttling.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index bb06738860c..28509fbba6f 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c @@ -1013,7 +1013,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) * affected cpu in order to get one proper T-state. * The notifier event is THROTTLING_PRECHANGE. */ - for_each_cpu_mask(i, online_throttling_cpus) { + for_each_cpu_mask_nr(i, online_throttling_cpus) { t_state.cpu = i; acpi_processor_throttling_notifier(THROTTLING_PRECHANGE, &t_state); @@ -1034,7 +1034,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) * it is necessary to set T-state for every affected * cpus. */ - for_each_cpu_mask(i, online_throttling_cpus) { + for_each_cpu_mask_nr(i, online_throttling_cpus) { match_pr = processors[i]; /* * If the pointer is invalid, we will report the @@ -1068,7 +1068,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) * affected cpu to update the T-states. * The notifier event is THROTTLING_POSTCHANGE */ - for_each_cpu_mask(i, online_throttling_cpus) { + for_each_cpu_mask_nr(i, online_throttling_cpus) { t_state.cpu = i; acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE, &t_state); From 068b12772a64c2440ef2f64ac5d780688c06576f Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Mon, 12 May 2008 21:21:13 +0200 Subject: [PATCH 0006/1435] cpufreq: use performance variant for_each_cpu_mask_nr Change references from for_each_cpu_mask to for_each_cpu_mask_nr where appropriate Reviewed-by: Paul Jackson Reviewed-by: Christoph Lameter Signed-off-by: Mike Travis Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- drivers/cpufreq/cpufreq.c | 14 +++++++------- drivers/cpufreq/cpufreq_conservative.c | 2 +- drivers/cpufreq/cpufreq_ondemand.c | 4 ++-- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 7fce038fa57..4937f2f97d8 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -589,7 +589,7 @@ static ssize_t show_cpus(cpumask_t mask, char *buf) ssize_t i = 0; unsigned int cpu; - for_each_cpu_mask(cpu, mask) { + for_each_cpu_mask_nr(cpu, mask) { if (i) i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " "); i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu); @@ -835,7 +835,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) } #endif - for_each_cpu_mask(j, policy->cpus) { + for_each_cpu_mask_nr(j, policy->cpus) { if (cpu == j) continue; @@ -898,14 +898,14 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) } spin_lock_irqsave(&cpufreq_driver_lock, flags); - for_each_cpu_mask(j, policy->cpus) { + for_each_cpu_mask_nr(j, policy->cpus) { cpufreq_cpu_data[j] = policy; per_cpu(policy_cpu, j) = policy->cpu; } spin_unlock_irqrestore(&cpufreq_driver_lock, flags); /* symlink affected CPUs */ - for_each_cpu_mask(j, policy->cpus) { + for_each_cpu_mask_nr(j, policy->cpus) { if (j == cpu) continue; if (!cpu_online(j)) @@ -945,7 +945,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) err_out_unregister: spin_lock_irqsave(&cpufreq_driver_lock, flags); - for_each_cpu_mask(j, policy->cpus) + for_each_cpu_mask_nr(j, policy->cpus) cpufreq_cpu_data[j] = NULL; spin_unlock_irqrestore(&cpufreq_driver_lock, flags); @@ -1028,7 +1028,7 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) * links afterwards. */ if (unlikely(cpus_weight(data->cpus) > 1)) { - for_each_cpu_mask(j, data->cpus) { + for_each_cpu_mask_nr(j, data->cpus) { if (j == cpu) continue; cpufreq_cpu_data[j] = NULL; @@ -1038,7 +1038,7 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) spin_unlock_irqrestore(&cpufreq_driver_lock, flags); if (unlikely(cpus_weight(data->cpus) > 1)) { - for_each_cpu_mask(j, data->cpus) { + for_each_cpu_mask_nr(j, data->cpus) { if (j == cpu) continue; dprintk("removing link for cpu %u\n", j); diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 5d3a04ba6ad..fe565ee4375 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -497,7 +497,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, return rc; } - for_each_cpu_mask(j, policy->cpus) { + for_each_cpu_mask_nr(j, policy->cpus) { struct cpu_dbs_info_s *j_dbs_info; j_dbs_info = &per_cpu(cpu_dbs_info, j); j_dbs_info->cur_policy = policy; diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index d2af20dda38..33855cb3cf1 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -367,7 +367,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) /* Get Idle Time */ idle_ticks = UINT_MAX; - for_each_cpu_mask(j, policy->cpus) { + for_each_cpu_mask_nr(j, policy->cpus) { cputime64_t total_idle_ticks; unsigned int tmp_idle_ticks; struct cpu_dbs_info_s *j_dbs_info; @@ -521,7 +521,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, return rc; } - for_each_cpu_mask(j, policy->cpus) { + for_each_cpu_mask_nr(j, policy->cpus) { struct cpu_dbs_info_s *j_dbs_info; j_dbs_info = &per_cpu(cpu_dbs_info, j); j_dbs_info->cur_policy = policy; From 363ab6f1424cdea63e5d182312d60e19077b892a Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Mon, 12 May 2008 21:21:13 +0200 Subject: [PATCH 0007/1435] core: use performance variant for_each_cpu_mask_nr Change references from for_each_cpu_mask to for_each_cpu_mask_nr where appropriate Reviewed-by: Paul Jackson Reviewed-by: Christoph Lameter Signed-off-by: Mike Travis Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- kernel/cpu.c | 2 +- kernel/rcuclassic.c | 2 +- kernel/rcupreempt.c | 10 +++++----- kernel/sched.c | 36 ++++++++++++++++++------------------ kernel/sched_fair.c | 2 +- kernel/sched_rt.c | 6 +++--- kernel/taskstats.c | 4 ++-- kernel/workqueue.c | 6 +++--- 8 files changed, 34 insertions(+), 34 deletions(-) diff --git a/kernel/cpu.c b/kernel/cpu.c index c77bc3a1c72..50ae922c602 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -390,7 +390,7 @@ void __ref enable_nonboot_cpus(void) goto out; printk("Enabling non-boot CPUs ...\n"); - for_each_cpu_mask(cpu, frozen_cpus) { + for_each_cpu_mask_nr(cpu, frozen_cpus) { error = _cpu_up(cpu, 1); if (!error) { printk("CPU%d is up\n", cpu); diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c index f4ffbd0f306..251358de70b 100644 --- a/kernel/rcuclassic.c +++ b/kernel/rcuclassic.c @@ -92,7 +92,7 @@ static void force_quiescent_state(struct rcu_data *rdp, */ cpumask = rcp->cpumask; cpu_clear(rdp->cpu, cpumask); - for_each_cpu_mask(cpu, cpumask) + for_each_cpu_mask_nr(cpu, cpumask) smp_send_reschedule(cpu); } } diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c index e1cdf196a51..18af270125c 100644 --- a/kernel/rcupreempt.c +++ b/kernel/rcupreempt.c @@ -657,7 +657,7 @@ rcu_try_flip_idle(void) /* Now ask each CPU for acknowledgement of the flip. */ - for_each_cpu_mask(cpu, rcu_cpu_online_map) { + for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) { per_cpu(rcu_flip_flag, cpu) = rcu_flipped; dyntick_save_progress_counter(cpu); } @@ -675,7 +675,7 @@ rcu_try_flip_waitack(void) int cpu; RCU_TRACE_ME(rcupreempt_trace_try_flip_a1); - for_each_cpu_mask(cpu, rcu_cpu_online_map) + for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) if (rcu_try_flip_waitack_needed(cpu) && per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) { RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1); @@ -707,7 +707,7 @@ rcu_try_flip_waitzero(void) /* Check to see if the sum of the "last" counters is zero. */ RCU_TRACE_ME(rcupreempt_trace_try_flip_z1); - for_each_cpu_mask(cpu, rcu_cpu_online_map) + for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx]; if (sum != 0) { RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1); @@ -722,7 +722,7 @@ rcu_try_flip_waitzero(void) smp_mb(); /* ^^^^^^^^^^^^ */ /* Call for a memory barrier from each CPU. */ - for_each_cpu_mask(cpu, rcu_cpu_online_map) { + for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) { per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed; dyntick_save_progress_counter(cpu); } @@ -742,7 +742,7 @@ rcu_try_flip_waitmb(void) int cpu; RCU_TRACE_ME(rcupreempt_trace_try_flip_m1); - for_each_cpu_mask(cpu, rcu_cpu_online_map) + for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) if (rcu_try_flip_waitmb_needed(cpu) && per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) { RCU_TRACE_ME(rcupreempt_trace_try_flip_me1); diff --git a/kernel/sched.c b/kernel/sched.c index 1ed8011db82..814d6e17f1e 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -2271,7 +2271,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) /* Tally up the load of all CPUs in the group */ avg_load = 0; - for_each_cpu_mask(i, group->cpumask) { + for_each_cpu_mask_nr(i, group->cpumask) { /* Bias balancing toward cpus of our domain */ if (local_group) load = source_load(i, load_idx); @@ -2313,7 +2313,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu, /* Traverse only the allowed CPUs */ cpus_and(*tmp, group->cpumask, p->cpus_allowed); - for_each_cpu_mask(i, *tmp) { + for_each_cpu_mask_nr(i, *tmp) { load = weighted_cpuload(i); if (load < min_load || (load == min_load && i == this_cpu)) { @@ -3296,7 +3296,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, max_cpu_load = 0; min_cpu_load = ~0UL; - for_each_cpu_mask(i, group->cpumask) { + for_each_cpu_mask_nr(i, group->cpumask) { struct rq *rq; if (!cpu_isset(i, *cpus)) @@ -3560,7 +3560,7 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle, unsigned long max_load = 0; int i; - for_each_cpu_mask(i, group->cpumask) { + for_each_cpu_mask_nr(i, group->cpumask) { unsigned long wl; if (!cpu_isset(i, *cpus)) @@ -4100,7 +4100,7 @@ static void run_rebalance_domains(struct softirq_action *h) int balance_cpu; cpu_clear(this_cpu, cpus); - for_each_cpu_mask(balance_cpu, cpus) { + for_each_cpu_mask_nr(balance_cpu, cpus) { /* * If this cpu gets work to do, stop the load balancing * work being done for other cpus. Next load @@ -6832,7 +6832,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map, cpus_clear(*covered); - for_each_cpu_mask(i, *span) { + for_each_cpu_mask_nr(i, *span) { struct sched_group *sg; int group = group_fn(i, cpu_map, &sg, tmpmask); int j; @@ -6843,7 +6843,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map, cpus_clear(sg->cpumask); sg->__cpu_power = 0; - for_each_cpu_mask(j, *span) { + for_each_cpu_mask_nr(j, *span) { if (group_fn(j, cpu_map, NULL, tmpmask) != group) continue; @@ -7043,7 +7043,7 @@ static void init_numa_sched_groups_power(struct sched_group *group_head) if (!sg) return; do { - for_each_cpu_mask(j, sg->cpumask) { + for_each_cpu_mask_nr(j, sg->cpumask) { struct sched_domain *sd; sd = &per_cpu(phys_domains, j); @@ -7068,7 +7068,7 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask) { int cpu, i; - for_each_cpu_mask(cpu, *cpu_map) { + for_each_cpu_mask_nr(cpu, *cpu_map) { struct sched_group **sched_group_nodes = sched_group_nodes_bycpu[cpu]; @@ -7302,7 +7302,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, /* * Set up domains for cpus specified by the cpu_map. */ - for_each_cpu_mask(i, *cpu_map) { + for_each_cpu_mask_nr(i, *cpu_map) { struct sched_domain *sd = NULL, *p; SCHED_CPUMASK_VAR(nodemask, allmasks); @@ -7374,7 +7374,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, #ifdef CONFIG_SCHED_SMT /* Set up CPU (sibling) groups */ - for_each_cpu_mask(i, *cpu_map) { + for_each_cpu_mask_nr(i, *cpu_map) { SCHED_CPUMASK_VAR(this_sibling_map, allmasks); SCHED_CPUMASK_VAR(send_covered, allmasks); @@ -7391,7 +7391,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, #ifdef CONFIG_SCHED_MC /* Set up multi-core groups */ - for_each_cpu_mask(i, *cpu_map) { + for_each_cpu_mask_nr(i, *cpu_map) { SCHED_CPUMASK_VAR(this_core_map, allmasks); SCHED_CPUMASK_VAR(send_covered, allmasks); @@ -7458,7 +7458,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, goto error; } sched_group_nodes[i] = sg; - for_each_cpu_mask(j, *nodemask) { + for_each_cpu_mask_nr(j, *nodemask) { struct sched_domain *sd; sd = &per_cpu(node_domains, j); @@ -7504,21 +7504,21 @@ static int __build_sched_domains(const cpumask_t *cpu_map, /* Calculate CPU power for physical packages and nodes */ #ifdef CONFIG_SCHED_SMT - for_each_cpu_mask(i, *cpu_map) { + for_each_cpu_mask_nr(i, *cpu_map) { struct sched_domain *sd = &per_cpu(cpu_domains, i); init_sched_groups_power(i, sd); } #endif #ifdef CONFIG_SCHED_MC - for_each_cpu_mask(i, *cpu_map) { + for_each_cpu_mask_nr(i, *cpu_map) { struct sched_domain *sd = &per_cpu(core_domains, i); init_sched_groups_power(i, sd); } #endif - for_each_cpu_mask(i, *cpu_map) { + for_each_cpu_mask_nr(i, *cpu_map) { struct sched_domain *sd = &per_cpu(phys_domains, i); init_sched_groups_power(i, sd); @@ -7538,7 +7538,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, #endif /* Attach the domains */ - for_each_cpu_mask(i, *cpu_map) { + for_each_cpu_mask_nr(i, *cpu_map) { struct sched_domain *sd; #ifdef CONFIG_SCHED_SMT sd = &per_cpu(cpu_domains, i); @@ -7621,7 +7621,7 @@ static void detach_destroy_domains(const cpumask_t *cpu_map) unregister_sched_domain_sysctl(); - for_each_cpu_mask(i, *cpu_map) + for_each_cpu_mask_nr(i, *cpu_map) cpu_attach_domain(NULL, &def_root_domain, i); synchronize_sched(); arch_destroy_sched_domains(cpu_map, &tmpmask); diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index e24ecd39c4b..e398318f101 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -1022,7 +1022,7 @@ static int wake_idle(int cpu, struct task_struct *p) || ((sd->flags & SD_WAKE_IDLE_FAR) && !task_hot(p, task_rq(p)->clock, sd))) { cpus_and(tmp, sd->span, p->cpus_allowed); - for_each_cpu_mask(i, tmp) { + for_each_cpu_mask_nr(i, tmp) { if (idle_cpu(i)) { if (i != task_cpu(p)) { schedstat_inc(p, diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 060e87b0cb1..d73386c6e36 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -231,7 +231,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) return 1; span = sched_rt_period_mask(); - for_each_cpu_mask(i, span) { + for_each_cpu_mask_nr(i, span) { int enqueue = 0; struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); struct rq *rq = rq_of_rt_rq(rt_rq); @@ -272,7 +272,7 @@ static int balance_runtime(struct rt_rq *rt_rq) spin_lock(&rt_b->rt_runtime_lock); rt_period = ktime_to_ns(rt_b->rt_period); - for_each_cpu_mask(i, rd->span) { + for_each_cpu_mask_nr(i, rd->span) { struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); s64 diff; @@ -1000,7 +1000,7 @@ static int pull_rt_task(struct rq *this_rq) next = pick_next_task_rt(this_rq); - for_each_cpu_mask(cpu, this_rq->rd->rto_mask) { + for_each_cpu_mask_nr(cpu, this_rq->rd->rto_mask) { if (this_cpu == cpu) continue; diff --git a/kernel/taskstats.c b/kernel/taskstats.c index 4a23517169a..06b17547f4e 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c @@ -301,7 +301,7 @@ static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd) return -EINVAL; if (isadd == REGISTER) { - for_each_cpu_mask(cpu, mask) { + for_each_cpu_mask_nr(cpu, mask) { s = kmalloc_node(sizeof(struct listener), GFP_KERNEL, cpu_to_node(cpu)); if (!s) @@ -320,7 +320,7 @@ static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd) /* Deregister or cleanup */ cleanup: - for_each_cpu_mask(cpu, mask) { + for_each_cpu_mask_nr(cpu, mask) { listeners = &per_cpu(listener_array, cpu); down_write(&listeners->sem); list_for_each_entry_safe(s, tmp, &listeners->list, list) { diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 29fc39f1029..28c2b2c96ac 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -397,7 +397,7 @@ void flush_workqueue(struct workqueue_struct *wq) might_sleep(); lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); lock_release(&wq->lockdep_map, 1, _THIS_IP_); - for_each_cpu_mask(cpu, *cpu_map) + for_each_cpu_mask_nr(cpu, *cpu_map) flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); } EXPORT_SYMBOL_GPL(flush_workqueue); @@ -477,7 +477,7 @@ static void wait_on_work(struct work_struct *work) wq = cwq->wq; cpu_map = wq_cpu_map(wq); - for_each_cpu_mask(cpu, *cpu_map) + for_each_cpu_mask_nr(cpu, *cpu_map) wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); } @@ -813,7 +813,7 @@ void destroy_workqueue(struct workqueue_struct *wq) list_del(&wq->list); spin_unlock(&workqueue_lock); - for_each_cpu_mask(cpu, *cpu_map) + for_each_cpu_mask_nr(cpu, *cpu_map) cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); put_online_cpus(); From 6d6a4360876f1e758e215570ccb04518db7cec3a Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Mon, 12 May 2008 21:21:13 +0200 Subject: [PATCH 0008/1435] mm: use performance variant for_each_cpu_mask_nr Change references from for_each_cpu_mask to for_each_cpu_mask_nr where appropriate Reviewed-by: Paul Jackson Reviewed-by: Christoph Lameter Signed-off-by: Mike Travis Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- mm/allocpercpu.c | 4 ++-- mm/vmstat.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c index f4026bae6ee..b5411c2c47d 100644 --- a/mm/allocpercpu.c +++ b/mm/allocpercpu.c @@ -35,7 +35,7 @@ EXPORT_SYMBOL_GPL(percpu_depopulate); void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask) { int cpu; - for_each_cpu_mask(cpu, *mask) + for_each_cpu_mask_nr(cpu, *mask) percpu_depopulate(__pdata, cpu); } EXPORT_SYMBOL_GPL(__percpu_depopulate_mask); @@ -86,7 +86,7 @@ int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp, int cpu; cpus_clear(populated); - for_each_cpu_mask(cpu, *mask) + for_each_cpu_mask_nr(cpu, *mask) if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) { __percpu_depopulate_mask(__pdata, &populated); return -ENOMEM; diff --git a/mm/vmstat.c b/mm/vmstat.c index db9eabb2c5b..c3d4a781802 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -26,7 +26,7 @@ static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask) memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long)); - for_each_cpu_mask(cpu, *cpumask) { + for_each_cpu_mask_nr(cpu, *cpumask) { struct vm_event_state *this = &per_cpu(vm_event_states, cpu); for (i = 0; i < NR_VM_EVENT_ITEMS; i++) From 0e12f848b337fc034ceb3c0d03d75f8de1b8cc96 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Mon, 12 May 2008 21:21:13 +0200 Subject: [PATCH 0009/1435] net: use performance variant for_each_cpu_mask_nr Change references from for_each_cpu_mask to for_each_cpu_mask_nr where appropriate Reviewed-by: Paul Jackson Reviewed-by: Christoph Lameter Signed-off-by: Mike Travis Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- net/core/dev.c | 4 ++-- net/iucv/iucv.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/net/core/dev.c b/net/core/dev.c index 58296307787..ee61b987f4d 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2239,7 +2239,7 @@ out: */ if (!cpus_empty(net_dma.channel_mask)) { int chan_idx; - for_each_cpu_mask(chan_idx, net_dma.channel_mask) { + for_each_cpu_mask_nr(chan_idx, net_dma.channel_mask) { struct dma_chan *chan = net_dma.channels[chan_idx]; if (chan) dma_async_memcpy_issue_pending(chan); @@ -4300,7 +4300,7 @@ static void net_dma_rebalance(struct net_dma *net_dma) i = 0; cpu = first_cpu(cpu_online_map); - for_each_cpu_mask(chan_idx, net_dma->channel_mask) { + for_each_cpu_mask_nr(chan_idx, net_dma->channel_mask) { chan = net_dma->channels[chan_idx]; n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask)) diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index 91897076213..8de51107059 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c @@ -497,7 +497,7 @@ static void iucv_setmask_up(void) /* Disable all cpu but the first in cpu_irq_cpumask. */ cpumask = iucv_irq_cpumask; cpu_clear(first_cpu(iucv_irq_cpumask), cpumask); - for_each_cpu_mask(cpu, cpumask) + for_each_cpu_mask_nr(cpu, cpumask) smp_call_function_single(cpu, iucv_block_cpu, NULL, 0, 1); } From 334ef7a7ab8f80b689a2be95d5e62d2167900865 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Mon, 12 May 2008 21:21:13 +0200 Subject: [PATCH 0010/1435] x86: use performance variant for_each_cpu_mask_nr Change references from for_each_cpu_mask to for_each_cpu_mask_nr where appropriate Reviewed-by: Paul Jackson Reviewed-by: Christoph Lameter Signed-off-by: Mike Travis Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner commit 2d474871e2fb092eb46a0930aba5442e10eb96cc Author: Mike Travis Date: Mon May 12 21:21:13 2008 +0200 --- arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 6 +++--- arch/x86/kernel/cpu/cpufreq/p4-clockmod.c | 6 +++--- arch/x86/kernel/cpu/cpufreq/powernow-k8.c | 8 ++++---- arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c | 10 +++++----- arch/x86/kernel/cpu/cpufreq/speedstep-ich.c | 4 ++-- arch/x86/kernel/cpu/intel_cacheinfo.c | 2 +- arch/x86/kernel/cpu/mcheck/mce_amd_64.c | 4 ++-- arch/x86/kernel/io_apic_64.c | 8 ++++---- arch/x86/kernel/smpboot.c | 8 ++++---- arch/x86/xen/smp.c | 4 ++-- include/asm-x86/ipi.h | 2 +- 11 files changed, 31 insertions(+), 31 deletions(-) diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index b0c8208df9f..dd097b83583 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -202,7 +202,7 @@ static void drv_write(struct drv_cmd *cmd) cpumask_t saved_mask = current->cpus_allowed; unsigned int i; - for_each_cpu_mask(i, cmd->mask) { + for_each_cpu_mask_nr(i, cmd->mask) { set_cpus_allowed_ptr(current, &cpumask_of_cpu(i)); do_drv_write(cmd); } @@ -451,7 +451,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, freqs.old = perf->states[perf->state].core_frequency * 1000; freqs.new = data->freq_table[next_state].frequency; - for_each_cpu_mask(i, cmd.mask) { + for_each_cpu_mask_nr(i, cmd.mask) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); } @@ -466,7 +466,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, } } - for_each_cpu_mask(i, cmd.mask) { + for_each_cpu_mask_nr(i, cmd.mask) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); } diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c index 199e4e05e5d..f1685fb91fb 100644 --- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c +++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c @@ -122,7 +122,7 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy, return 0; /* notifiers */ - for_each_cpu_mask(i, policy->cpus) { + for_each_cpu_mask_nr(i, policy->cpus) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); } @@ -130,11 +130,11 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy, /* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software * Developer's Manual, Volume 3 */ - for_each_cpu_mask(i, policy->cpus) + for_each_cpu_mask_nr(i, policy->cpus) cpufreq_p4_setdc(i, p4clockmod_table[newstate].index); /* notifiers */ - for_each_cpu_mask(i, policy->cpus) { + for_each_cpu_mask_nr(i, policy->cpus) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); } diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 46d4034d9f3..06d6eea5e07 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c @@ -966,7 +966,7 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, unsigned i freqs.old = find_khz_freq_from_fid(data->currfid); freqs.new = find_khz_freq_from_fid(fid); - for_each_cpu_mask(i, *(data->available_cores)) { + for_each_cpu_mask_nr(i, *(data->available_cores)) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); } @@ -974,7 +974,7 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, unsigned i res = transition_fid_vid(data, fid, vid); freqs.new = find_khz_freq_from_fid(data->currfid); - for_each_cpu_mask(i, *(data->available_cores)) { + for_each_cpu_mask_nr(i, *(data->available_cores)) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); } @@ -997,7 +997,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i freqs.old = find_khz_freq_from_pstate(data->powernow_table, data->currpstate); freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); - for_each_cpu_mask(i, *(data->available_cores)) { + for_each_cpu_mask_nr(i, *(data->available_cores)) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); } @@ -1005,7 +1005,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i res = transition_pstate(data, pstate); freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); - for_each_cpu_mask(i, *(data->available_cores)) { + for_each_cpu_mask_nr(i, *(data->available_cores)) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); } diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c index 908dd347c67..8b0dd6f2a1a 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c @@ -476,7 +476,7 @@ static int centrino_target (struct cpufreq_policy *policy, saved_mask = current->cpus_allowed; first_cpu = 1; cpus_clear(covered_cpus); - for_each_cpu_mask(j, online_policy_cpus) { + for_each_cpu_mask_nr(j, online_policy_cpus) { /* * Support for SMP systems. * Make sure we are running on CPU that wants to change freq @@ -517,7 +517,7 @@ static int centrino_target (struct cpufreq_policy *policy, dprintk("target=%dkHz old=%d new=%d msr=%04x\n", target_freq, freqs.old, freqs.new, msr); - for_each_cpu_mask(k, online_policy_cpus) { + for_each_cpu_mask_nr(k, online_policy_cpus) { freqs.cpu = k; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); @@ -540,7 +540,7 @@ static int centrino_target (struct cpufreq_policy *policy, preempt_enable(); } - for_each_cpu_mask(k, online_policy_cpus) { + for_each_cpu_mask_nr(k, online_policy_cpus) { freqs.cpu = k; cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); } @@ -554,7 +554,7 @@ static int centrino_target (struct cpufreq_policy *policy, */ if (!cpus_empty(covered_cpus)) { - for_each_cpu_mask(j, covered_cpus) { + for_each_cpu_mask_nr(j, covered_cpus) { set_cpus_allowed_ptr(current, &cpumask_of_cpu(j)); wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); @@ -564,7 +564,7 @@ static int centrino_target (struct cpufreq_policy *policy, tmp = freqs.new; freqs.new = freqs.old; freqs.old = tmp; - for_each_cpu_mask(j, online_policy_cpus) { + for_each_cpu_mask_nr(j, online_policy_cpus) { freqs.cpu = j; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c index 1b50244b1fd..191f7263c61 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c @@ -279,7 +279,7 @@ static int speedstep_target (struct cpufreq_policy *policy, cpus_allowed = current->cpus_allowed; - for_each_cpu_mask(i, policy->cpus) { + for_each_cpu_mask_nr(i, policy->cpus) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); } @@ -292,7 +292,7 @@ static int speedstep_target (struct cpufreq_policy *policy, /* allow to be run on all CPUs */ set_cpus_allowed_ptr(current, &cpus_allowed); - for_each_cpu_mask(i, policy->cpus) { + for_each_cpu_mask_nr(i, policy->cpus) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); } diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 26d615dcb14..bfade3301c3 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c @@ -488,7 +488,7 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) int sibling; this_leaf = CPUID4_INFO_IDX(cpu, index); - for_each_cpu_mask(sibling, this_leaf->shared_cpu_map) { + for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) { sibling_leaf = CPUID4_INFO_IDX(sibling, index); cpu_clear(cpu, sibling_leaf->shared_cpu_map); } diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c index 7c9a813e119..88736cadbaa 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c @@ -527,7 +527,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) if (err) goto out_free; - for_each_cpu_mask(i, b->cpus) { + for_each_cpu_mask_nr(i, b->cpus) { if (i == cpu) continue; @@ -617,7 +617,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank) #endif /* remove all sibling symlinks before unregistering */ - for_each_cpu_mask(i, b->cpus) { + for_each_cpu_mask_nr(i, b->cpus) { if (i == cpu) continue; diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c index ef1a8dfcc52..e2838cbd2ff 100644 --- a/arch/x86/kernel/io_apic_64.c +++ b/arch/x86/kernel/io_apic_64.c @@ -718,7 +718,7 @@ static int __assign_irq_vector(int irq, cpumask_t mask) return 0; } - for_each_cpu_mask(cpu, mask) { + for_each_cpu_mask_nr(cpu, mask) { cpumask_t domain, new_mask; int new_cpu; int vector, offset; @@ -739,7 +739,7 @@ next: continue; if (vector == IA32_SYSCALL_VECTOR) goto next; - for_each_cpu_mask(new_cpu, new_mask) + for_each_cpu_mask_nr(new_cpu, new_mask) if (per_cpu(vector_irq, new_cpu)[vector] != -1) goto next; /* Found one! */ @@ -749,7 +749,7 @@ next: cfg->move_in_progress = 1; cfg->old_domain = cfg->domain; } - for_each_cpu_mask(new_cpu, new_mask) + for_each_cpu_mask_nr(new_cpu, new_mask) per_cpu(vector_irq, new_cpu)[vector] = irq; cfg->vector = vector; cfg->domain = domain; @@ -781,7 +781,7 @@ static void __clear_irq_vector(int irq) vector = cfg->vector; cpus_and(mask, cfg->domain, cpu_online_map); - for_each_cpu_mask(cpu, mask) + for_each_cpu_mask_nr(cpu, mask) per_cpu(vector_irq, cpu)[vector] = -1; cfg->vector = 0; diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 38988491c62..fff8ebaa554 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -487,7 +487,7 @@ void __cpuinit set_cpu_sibling_map(int cpu) cpu_set(cpu, cpu_sibling_setup_map); if (smp_num_siblings > 1) { - for_each_cpu_mask(i, cpu_sibling_setup_map) { + for_each_cpu_mask_nr(i, cpu_sibling_setup_map) { if (c->phys_proc_id == cpu_data(i).phys_proc_id && c->cpu_core_id == cpu_data(i).cpu_core_id) { cpu_set(i, per_cpu(cpu_sibling_map, cpu)); @@ -510,7 +510,7 @@ void __cpuinit set_cpu_sibling_map(int cpu) return; } - for_each_cpu_mask(i, cpu_sibling_setup_map) { + for_each_cpu_mask_nr(i, cpu_sibling_setup_map) { if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { cpu_set(i, c->llc_shared_map); @@ -1298,7 +1298,7 @@ static void remove_siblinginfo(int cpu) int sibling; struct cpuinfo_x86 *c = &cpu_data(cpu); - for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) { + for_each_cpu_mask_nr(sibling, per_cpu(cpu_core_map, cpu)) { cpu_clear(cpu, per_cpu(cpu_core_map, sibling)); /*/ * last thread sibling in this cpu core going down @@ -1307,7 +1307,7 @@ static void remove_siblinginfo(int cpu) cpu_data(sibling).booted_cores--; } - for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu)) + for_each_cpu_mask_nr(sibling, per_cpu(cpu_sibling_map, cpu)) cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); cpus_clear(per_cpu(cpu_sibling_map, cpu)); cpus_clear(per_cpu(cpu_core_map, cpu)); diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 94e69000f98..7a70638797e 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -345,7 +345,7 @@ static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector) cpus_and(mask, mask, cpu_online_map); - for_each_cpu_mask(cpu, mask) + for_each_cpu_mask_nr(cpu, mask) xen_send_IPI_one(cpu, vector); } @@ -413,7 +413,7 @@ int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *), /* Make sure other vcpus get a chance to run if they need to. */ yield = false; - for_each_cpu_mask(cpu, mask) + for_each_cpu_mask_nr(cpu, mask) if (xen_vcpu_stolen(cpu)) yield = true; diff --git a/include/asm-x86/ipi.h b/include/asm-x86/ipi.h index ecc80f341f3..5f7310aa3ef 100644 --- a/include/asm-x86/ipi.h +++ b/include/asm-x86/ipi.h @@ -121,7 +121,7 @@ static inline void send_IPI_mask_sequence(cpumask_t mask, int vector) * - mbligh */ local_irq_save(flags); - for_each_cpu_mask(query_cpu, mask) { + for_each_cpu_mask_nr(query_cpu, mask) { __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu), vector, APIC_DEST_PHYSICAL); } From 5d7bfd0c4d463d288422032c9903d0452dee141d Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Mon, 12 May 2008 21:21:13 +0200 Subject: [PATCH 0011/1435] infiniband: use performance variant for_each_cpu_mask_nr Change references from for_each_cpu_mask to for_each_cpu_mask_nr where appropriate Reviewed-by: Paul Jackson Reviewed-by: Christoph Lameter Signed-off-by: Mike Travis Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- drivers/infiniband/hw/ehca/ehca_irq.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c index ce1ab0571be..43180b952c1 100644 --- a/drivers/infiniband/hw/ehca/ehca_irq.c +++ b/drivers/infiniband/hw/ehca/ehca_irq.c @@ -641,8 +641,8 @@ static inline int find_next_online_cpu(struct ehca_comp_pool *pool) ehca_dmp(&cpu_online_map, sizeof(cpumask_t), ""); spin_lock_irqsave(&pool->last_cpu_lock, flags); - cpu = next_cpu(pool->last_cpu, cpu_online_map); - if (cpu == NR_CPUS) + cpu = next_cpu_nr(pool->last_cpu, cpu_online_map); + if (cpu >= nr_cpu_ids) cpu = first_cpu(cpu_online_map); pool->last_cpu = cpu; spin_unlock_irqrestore(&pool->last_cpu_lock, flags); From cad0e458d17c643c20c1d38f45a1d26125e6a622 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Mon, 12 May 2008 21:21:13 +0200 Subject: [PATCH 0012/1435] clocksource/events: use performance variant for_each_cpu_mask_nr Change references from for_each_cpu_mask to for_each_cpu_mask_nr where appropriate Reviewed-by: Paul Jackson Reviewed-by: Christoph Lameter Signed-off-by: Mike Travis Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- kernel/time/clocksource.c | 4 ++-- kernel/time/tick-broadcast.c | 3 +-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index dadde5361f3..60ceabd53f2 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -145,9 +145,9 @@ static void clocksource_watchdog(unsigned long data) * Cycle through CPUs to check if the CPUs stay * synchronized to each other. */ - int next_cpu = next_cpu(raw_smp_processor_id(), cpu_online_map); + int next_cpu = next_cpu_nr(raw_smp_processor_id(), cpu_online_map); - if (next_cpu >= NR_CPUS) + if (next_cpu >= nr_cpu_ids) next_cpu = first_cpu(cpu_online_map); watchdog_timer.expires += WATCHDOG_INTERVAL; add_timer_on(&watchdog_timer, next_cpu); diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 57a1f02e5ec..2d0a9634625 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -397,8 +397,7 @@ again: mask = CPU_MASK_NONE; now = ktime_get(); /* Find all expired events */ - for (cpu = first_cpu(tick_broadcast_oneshot_mask); cpu != NR_CPUS; - cpu = next_cpu(cpu, tick_broadcast_oneshot_mask)) { + for_each_cpu_mask_nr(cpu, tick_broadcast_oneshot_mask) { td = &per_cpu(tick_cpu_device, cpu); if (td->evtdev->next_event.tv64 <= now.tv64) cpu_set(cpu, mask); From 3f9b48a7584851997702cdc3f58e7811b5546397 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Mon, 12 May 2008 21:21:13 +0200 Subject: [PATCH 0013/1435] net: Pass reference to cpumask variable in net/sunrpc/svc.c * Pass reference to cpumask variable instead of using stack. For inclusion into sched-devel/latest tree. Based on: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git + sched-devel/latest .../mingo/linux-2.6-sched-devel.git Signed-off-by: Mike Travis Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- net/sunrpc/svc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 01c7e311b90..d43cf8ddff6 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -603,7 +603,7 @@ __svc_create_thread(svc_thread_fn func, struct svc_serv *serv, error = kernel_thread((int (*)(void *)) func, rqstp, 0); if (have_oldmask) - set_cpus_allowed(current, oldmask); + set_cpus_allowed_ptr(current, &oldmask); if (error < 0) goto out_thread; From 7baac8b91f9871ba8cb09af84de4ae1d86d07812 Mon Sep 17 00:00:00 2001 From: Alexander van Heukelum Date: Tue, 13 May 2008 11:28:21 +0200 Subject: [PATCH 0014/1435] cpumask: make for_each_cpu_mask a bit smaller The for_each_cpu_mask loop is used quite often in the kernel. It makes use of two functions: first_cpu and next_cpu. This patch changes for_each_cpu_mask to use only the latter. Because next_cpu finds the next eligible cpu _after_ the given one, the iteration variable has to be initialized to -1 and next_cpu has to be called with this value before the first iteration. An x86_64 defconfig kernel (from sched/latest) is about 2500 bytes smaller with this patch applied: text data bss dec hex filename 6222517 917952 749932 7890401 7865e1 vmlinux.orig 6219922 917952 749932 7887806 785bbe vmlinux The same size reduction is seen for defconfig+MAXSMP text data bss dec hex filename 6241772 2563968 1492716 10298456 9d2458 vmlinux.orig 6239211 2563968 1492716 10295895 9d1a57 vmlinux Signed-off-by: Alexander van Heukelum Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- include/linux/cpumask.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index b49472d1af8..80226e77614 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -370,10 +370,10 @@ int __any_online_cpu(const cpumask_t *mask); #define first_cpu(src) __first_cpu(&(src)) #define next_cpu(n, src) __next_cpu((n), &(src)) #define any_online_cpu(mask) __any_online_cpu(&(mask)) -#define for_each_cpu_mask(cpu, mask) \ - for ((cpu) = first_cpu(mask); \ - (cpu) < NR_CPUS; \ - (cpu) = next_cpu((cpu), (mask))) +#define for_each_cpu_mask(cpu, mask) \ + for ((cpu) = -1; \ + (cpu) = next_cpu((cpu), (mask)), \ + (cpu) < NR_CPUS; ) #endif #if NR_CPUS <= 64 @@ -387,10 +387,10 @@ int __any_online_cpu(const cpumask_t *mask); int __next_cpu_nr(int n, const cpumask_t *srcp); #define next_cpu_nr(n, src) __next_cpu_nr((n), &(src)) #define cpus_weight_nr(cpumask) __cpus_weight(&(cpumask), nr_cpu_ids) -#define for_each_cpu_mask_nr(cpu, mask) \ - for ((cpu) = first_cpu(mask); \ - (cpu) < nr_cpu_ids; \ - (cpu) = next_cpu_nr((cpu), (mask))) +#define for_each_cpu_mask_nr(cpu, mask) \ + for ((cpu) = -1; \ + (cpu) = next_cpu_nr((cpu), (mask)), \ + (cpu) < nr_cpu_ids; ) #endif /* NR_CPUS > 64 */ From 9c44bc03fff44ff04237a7d92e35304a0e50c331 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 12 May 2008 21:21:04 +0200 Subject: [PATCH 0015/1435] softlockup: allow panic on lockup allow users to configure the softlockup detector to generate a panic instead of a warning message. high-availability systems might opt for this strict method (combined with panic_timeout= boot option/sysctl), instead of generating softlockup warnings ad infinitum. also, automated tests work better if the system reboots reliably (into a safe kernel) in case of a lockup. The full spectrum of configurability is supported: boot option, sysctl option and Kconfig option. it's default-disabled. Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- Documentation/kernel-parameters.txt | 3 +++ include/linux/sched.h | 3 ++- kernel/softlockup.c | 21 +++++++++++++++++++++ kernel/sysctl.c | 11 +++++++++++ lib/Kconfig.debug | 26 +++++++++++++++++++++++++- 5 files changed, 62 insertions(+), 2 deletions(-) diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index e07c432c731..042588fa12e 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -1971,6 +1971,9 @@ and is between 256 and 4096 characters. It is defined in the file snd-ymfpci= [HW,ALSA] + softlockup_panic= + [KNL] Should the soft-lockup detector generate panics. + sonypi.*= [HW] Sony Programmable I/O Control Device driver See Documentation/sonypi.txt diff --git a/include/linux/sched.h b/include/linux/sched.h index 5395a6176f4..71f5972dc48 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -294,7 +294,8 @@ extern void softlockup_tick(void); extern void spawn_softlockup_task(void); extern void touch_softlockup_watchdog(void); extern void touch_all_softlockup_watchdogs(void); -extern unsigned long softlockup_thresh; +extern unsigned int softlockup_panic; +extern unsigned long softlockup_thresh; extern unsigned long sysctl_hung_task_check_count; extern unsigned long sysctl_hung_task_timeout_secs; extern unsigned long sysctl_hung_task_warnings; diff --git a/kernel/softlockup.c b/kernel/softlockup.c index 01b6522fd92..78e0ad21cb0 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c @@ -27,6 +27,21 @@ static DEFINE_PER_CPU(struct task_struct *, watchdog_task); static int __read_mostly did_panic; unsigned long __read_mostly softlockup_thresh = 60; +/* + * Should we panic (and reboot, if panic_timeout= is set) when a + * soft-lockup occurs: + */ +unsigned int __read_mostly softlockup_panic = + CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE; + +static int __init softlockup_panic_setup(char *str) +{ + softlockup_panic = simple_strtoul(str, NULL, 0); + + return 1; +} +__setup("softlockup_panic=", softlockup_panic_setup); + static int softlock_panic(struct notifier_block *this, unsigned long event, void *ptr) { @@ -120,6 +135,9 @@ void softlockup_tick(void) else dump_stack(); spin_unlock(&print_lock); + + if (softlockup_panic) + panic("softlockup: hung tasks"); } /* @@ -172,6 +190,9 @@ static void check_hung_task(struct task_struct *t, unsigned long now) t->last_switch_timestamp = now; touch_nmi_watchdog(); + + if (softlockup_panic) + panic("softlockup: blocked tasks"); } /* diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 29116652dca..2d3b388c402 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -727,6 +727,17 @@ static struct ctl_table kern_table[] = { }, #endif #ifdef CONFIG_DETECT_SOFTLOCKUP + { + .ctl_name = CTL_UNNUMBERED, + .procname = "softlockup_panic", + .data = &softlockup_panic, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_doulongvec_minmax, + .strategy = &sysctl_intvec, + .extra1 = &zero, + .extra2 = &one, + }, { .ctl_name = CTL_UNNUMBERED, .procname = "softlockup_thresh", diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index d2099f41aa1..509ae35a9ef 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -147,7 +147,7 @@ config DETECT_SOFTLOCKUP help Say Y here to enable the kernel to detect "soft lockups", which are bugs that cause the kernel to loop in kernel - mode for more than 10 seconds, without giving other tasks a + mode for more than 60 seconds, without giving other tasks a chance to run. When a soft-lockup is detected, the kernel will print the @@ -159,6 +159,30 @@ config DETECT_SOFTLOCKUP can be detected via the NMI-watchdog, on platforms that support it.) +config BOOTPARAM_SOFTLOCKUP_PANIC + bool "Panic (Reboot) On Soft Lockups" + depends on DETECT_SOFTLOCKUP + help + Say Y here to enable the kernel to panic on "soft lockups", + which are bugs that cause the kernel to loop in kernel + mode for more than 60 seconds, without giving other tasks a + chance to run. + + The panic can be used in combination with panic_timeout, + to cause the system to reboot automatically after a + lockup has been detected. This feature is useful for + high-availability systems that have uptime guarantees and + where a lockup must be resolved ASAP. + + Say N if unsure. + +config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE + int + depends on DETECT_SOFTLOCKUP + range 0 1 + default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC + default 1 if BOOTPARAM_SOFTLOCKUP_PANIC + config SCHED_DEBUG bool "Collect scheduler debugging info" depends on DEBUG_KERNEL && PROC_FS From 9383d9679056e6cc4e7ff70f31da945a268238f4 Mon Sep 17 00:00:00 2001 From: Dimitri Sivanich Date: Mon, 12 May 2008 21:21:14 +0200 Subject: [PATCH 0016/1435] softlockup: fix softlockup_thresh unaligned access and disable detection at runtime Fix unaligned access errors when setting softlockup_thresh on 64 bit platforms. Allow softlockup detection to be disabled by setting softlockup_thresh <= 0. Detect that boot time softlockup detection has been disabled earlier in softlockup_tick. Signed-off-by: Dimitri Sivanich Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- include/linux/sched.h | 2 +- kernel/softlockup.c | 12 ++++++++++-- kernel/sysctl.c | 9 +++++---- 3 files changed, 16 insertions(+), 7 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 71f5972dc48..ea26221644e 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -295,10 +295,10 @@ extern void spawn_softlockup_task(void); extern void touch_softlockup_watchdog(void); extern void touch_all_softlockup_watchdogs(void); extern unsigned int softlockup_panic; -extern unsigned long softlockup_thresh; extern unsigned long sysctl_hung_task_check_count; extern unsigned long sysctl_hung_task_timeout_secs; extern unsigned long sysctl_hung_task_warnings; +extern int softlockup_thresh; #else static inline void softlockup_tick(void) { diff --git a/kernel/softlockup.c b/kernel/softlockup.c index 78e0ad21cb0..a3a0b239b7f 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c @@ -25,7 +25,7 @@ static DEFINE_PER_CPU(unsigned long, print_timestamp); static DEFINE_PER_CPU(struct task_struct *, watchdog_task); static int __read_mostly did_panic; -unsigned long __read_mostly softlockup_thresh = 60; +int __read_mostly softlockup_thresh = 60; /* * Should we panic (and reboot, if panic_timeout= is set) when a @@ -94,6 +94,14 @@ void softlockup_tick(void) struct pt_regs *regs = get_irq_regs(); unsigned long now; + /* Is detection switched off? */ + if (!per_cpu(watchdog_task, this_cpu) || softlockup_thresh <= 0) { + /* Be sure we don't false trigger if switched back on */ + if (touch_timestamp) + per_cpu(touch_timestamp, this_cpu) = 0; + return; + } + if (touch_timestamp == 0) { touch_softlockup_watchdog(); return; @@ -104,7 +112,7 @@ void softlockup_tick(void) /* report at most once a second */ if ((print_timestamp >= touch_timestamp && print_timestamp < (touch_timestamp + 1)) || - did_panic || !per_cpu(watchdog_task, this_cpu)) { + did_panic) { return; } diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 2d3b388c402..31c19a79738 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -84,12 +84,13 @@ extern int latencytop_enabled; extern int sysctl_nr_open_min, sysctl_nr_open_max; /* Constants used for minimum and maximum */ -#if defined(CONFIG_DETECT_SOFTLOCKUP) || defined(CONFIG_HIGHMEM) +#ifdef CONFIG_HIGHMEM static int one = 1; #endif #ifdef CONFIG_DETECT_SOFTLOCKUP static int sixty = 60; +static int neg_one = -1; #endif #ifdef CONFIG_MMU @@ -742,11 +743,11 @@ static struct ctl_table kern_table[] = { .ctl_name = CTL_UNNUMBERED, .procname = "softlockup_thresh", .data = &softlockup_thresh, - .maxlen = sizeof(unsigned long), + .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_doulongvec_minmax, + .proc_handler = &proc_dointvec_minmax, .strategy = &sysctl_intvec, - .extra1 = &one, + .extra1 = &neg_one, .extra2 = &sixty, }, { From 1c4cd6dd1d0fd3057bb6b8c87460049497889d1b Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 12 May 2008 21:21:14 +0200 Subject: [PATCH 0017/1435] softlockup: fix softlockup_thresh fix Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- kernel/sysctl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 31c19a79738..a829dc8d7a9 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -84,7 +84,7 @@ extern int latencytop_enabled; extern int sysctl_nr_open_min, sysctl_nr_open_max; /* Constants used for minimum and maximum */ -#ifdef CONFIG_HIGHMEM +#if defined(CONFIG_HIGHMEM) || defined(CONFIG_DETECT_SOFTLOCKUP) static int one = 1; #endif From 02ff375590ac4140d88afc76505df1ad45c6af59 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 12 May 2008 15:43:53 +0200 Subject: [PATCH 0018/1435] softlockup: fix false positives on nohz if CPU is 100% idle for more than 60 seconds Fix (probably theoretical only) rq->clock update bug: in tick_nohz_update_jiffies() [which is called on all irq entry on all cpus where the irq entry hits an idle cpu] we call touch_softlockup_watchdog() before we update jiffies. That works fine most of the time when idle timeouts are within 60 seconds. But when an idle timeout is beyond 60 seconds, jiffies is updated with a jump of more than 60 seconds, which causes a jump in cpu-clock of more than 60 seconds, triggering a false positive. Reported-by: David Miller Signed-off-by: Ingo Molnar --- kernel/time/tick-sched.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index b854a895591..28abad66fc8 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -133,8 +133,6 @@ void tick_nohz_update_jiffies(void) if (!ts->tick_stopped) return; - touch_softlockup_watchdog(); - cpu_clear(cpu, nohz_cpu_mask); now = ktime_get(); ts->idle_waketime = now; @@ -142,6 +140,8 @@ void tick_nohz_update_jiffies(void) local_irq_save(flags); tick_do_update_jiffies64(now); local_irq_restore(flags); + + touch_softlockup_watchdog(); } void tick_nohz_stop_idle(int cpu) From 8c2238eaaf0f774ca0f8d9daad7a616429bbb7f1 Mon Sep 17 00:00:00 2001 From: Jason Wessel Date: Tue, 27 May 2008 12:23:29 -0500 Subject: [PATCH 0019/1435] softlockup: fix NMI hangs due to lock race - 2.6.26-rc regression The touch_nmi_watchdog() routine on x86 ultimately calls touch_softlockup_watchdog(). The problem is that to touch the softlockup watchdog, the cpu_clock code has to be called which could involve multiple cpu locks and can lead to a hard hang if one of the locks is held by a processor that is not going to return anytime soon (such as could be the case with kgdb or perhaps even with some other kind of exception). This patch causes the public version of the touch_softlockup_watchdog() to defer the cpu clock access to a later point. The test case for this problem is to use the following kernel config options: CONFIG_KGDB_TESTS=y CONFIG_KGDB_TESTS_ON_BOOT=y CONFIG_KGDB_TESTS_BOOT_STRING="V1F100I100000" It should be noted that kgdb test suite and these options were not available until 2.6.26-rc2, so it was necessary to patch the kgdb test suite during the bisection. I would consider this patch a regression fix because the problem first appeared in commit 27ec4407790d075c325e1f4da0a19c56953cce23 when some logic was added to try to periodically sync the clocks. It was possible to work around this particular problem by simply not performing the sync anytime the system was in a critical context. This was ok until commit 3e51f33fcc7f55e6df25d15b55ed10c8b4da84cd, which added config option CONFIG_HAVE_UNSTABLE_SCHED_CLOCK and some multi-cpu locks to sync the clocks. It became clear that accessing this code from an nmi was the source of the lockups. Avoiding the access to the low level clock code from an code inside the NMI processing also fixed the problem with the 27ec44... commit. Signed-off-by: Jason Wessel Signed-off-by: Ingo Molnar --- kernel/softlockup.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/kernel/softlockup.c b/kernel/softlockup.c index a3a0b239b7f..6b682d86bdd 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c @@ -64,12 +64,17 @@ static unsigned long get_timestamp(int this_cpu) return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */ } -void touch_softlockup_watchdog(void) +static void __touch_softlockup_watchdog(void) { int this_cpu = raw_smp_processor_id(); __raw_get_cpu_var(touch_timestamp) = get_timestamp(this_cpu); } + +void touch_softlockup_watchdog(void) +{ + __raw_get_cpu_var(touch_timestamp) = 0; +} EXPORT_SYMBOL(touch_softlockup_watchdog); void touch_all_softlockup_watchdogs(void) @@ -103,7 +108,7 @@ void softlockup_tick(void) } if (touch_timestamp == 0) { - touch_softlockup_watchdog(); + __touch_softlockup_watchdog(); return; } @@ -118,7 +123,7 @@ void softlockup_tick(void) /* do not print during early bootup: */ if (unlikely(system_state != SYSTEM_RUNNING)) { - touch_softlockup_watchdog(); + __touch_softlockup_watchdog(); return; } @@ -243,7 +248,7 @@ static int watchdog(void *__bind_cpu) sched_setscheduler(current, SCHED_FIFO, ¶m); /* initialize timestamp */ - touch_softlockup_watchdog(); + __touch_softlockup_watchdog(); set_current_state(TASK_INTERRUPTIBLE); /* @@ -252,7 +257,7 @@ static int watchdog(void *__bind_cpu) * debug-printout triggers in softlockup_tick(). */ while (!kthread_should_stop()) { - touch_softlockup_watchdog(); + __touch_softlockup_watchdog(); schedule(); if (kthread_should_stop()) From a29ccf6f823a84d89e1c7aaaf221cf7282022024 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Tue, 3 Jun 2008 14:59:40 +0100 Subject: [PATCH 0020/1435] Make console charset translation optional By turning off the new CONSOLE_TRANSLATIONS option and dropping the associated code and tables from the kernel, we can save about 7KiB. Taken from linux-tiny project by Tim Bird and mangled further by dwmw2. Signed-off-by: Tim Bird Signed-off-by: David Woodhouse --- drivers/char/Kconfig | 8 ++++++++ drivers/char/Makefile | 4 ++-- drivers/char/vt.c | 2 +- include/linux/consolemap.h | 14 ++++++++++++++ include/linux/vt_kern.h | 19 +++++++++++++++++++ 5 files changed, 44 insertions(+), 3 deletions(-) diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 595a925c62a..b7f7371dee7 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -36,6 +36,14 @@ config VT If unsure, say Y, or else you won't be able to do much with your new shiny Linux system :-) +config CONSOLE_TRANSLATIONS + depends on VT + default y + bool "Enable character translations in console" if EMBEDDED + ---help--- + This enables support for font mapping and Unicode translation + on virtual consoles. + config VT_CONSOLE bool "Support for console on virtual terminal" if EMBEDDED depends on VT diff --git a/drivers/char/Makefile b/drivers/char/Makefile index 4c1c584e9eb..6ef173cab14 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile @@ -12,8 +12,8 @@ obj-y += mem.o random.o tty_io.o n_tty.o tty_ioctl.o obj-$(CONFIG_LEGACY_PTYS) += pty.o obj-$(CONFIG_UNIX98_PTYS) += pty.o obj-y += misc.o -obj-$(CONFIG_VT) += vt_ioctl.o vc_screen.o consolemap.o \ - consolemap_deftbl.o selection.o keyboard.o +obj-$(CONFIG_VT) += vt_ioctl.o vc_screen.o selection.o keyboard.o +obj-$(CONFIG_CONSOLE_TRANSLATIONS) += consolemap.o consolemap_deftbl.o obj-$(CONFIG_HW_CONSOLE) += vt.o defkeymap.o obj-$(CONFIG_AUDIT) += tty_audit.o obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o diff --git a/drivers/char/vt.c b/drivers/char/vt.c index fa1ffbf2c62..18b7fb06dac 100644 --- a/drivers/char/vt.c +++ b/drivers/char/vt.c @@ -2208,7 +2208,7 @@ rescan_last_byte: c = 0xfffd; tc = c; } else { /* no utf or alternate charset mode */ - tc = vc->vc_translate[vc->vc_toggle_meta ? (c | 0x80) : c]; + tc = vc_translate(vc, c); } param.c = tc; diff --git a/include/linux/consolemap.h b/include/linux/consolemap.h index e2bf7e5db39..c4811da1338 100644 --- a/include/linux/consolemap.h +++ b/include/linux/consolemap.h @@ -3,6 +3,9 @@ * * Interface between console.c, selection.c and consolemap.c */ +#ifndef __LINUX_CONSOLEMAP_H__ +#define __LINUX_CONSOLEMAP_H__ + #define LAT1_MAP 0 #define GRAF_MAP 1 #define IBMPC_MAP 2 @@ -10,6 +13,7 @@ #include +#ifdef CONFIG_CONSOLE_TRANSLATIONS struct vc_data; extern u16 inverse_translate(struct vc_data *conp, int glyph, int use_unicode); @@ -18,3 +22,13 @@ extern int conv_uni_to_pc(struct vc_data *conp, long ucs); extern u32 conv_8bit_to_uni(unsigned char c); extern int conv_uni_to_8bit(u32 uni); void console_map_init(void); +#else +#define inverse_translate(conp, glyph, uni) ((uint16_t)glyph) +#define set_translate(m, vc) ((unsigned short *)NULL) +#define conv_uni_to_pc(conp, ucs) ((int) (ucs > 0xff ? -1: ucs)) +#define conv_8bit_to_uni(c) ((uint32_t)(c)) +#define conv_uni_to_8bit(c) ((int) ((c) & 0xff)) +#define console_map_init(c) do { ; } while (0) +#endif /* CONFIG_CONSOLE_TRANSLATIONS */ + +#endif /* __LINUX_CONSOLEMAP_H__ */ diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h index 9448ffbdcbf..14c0e91be9b 100644 --- a/include/linux/vt_kern.h +++ b/include/linux/vt_kern.h @@ -12,6 +12,7 @@ #include #include #include +#include /* * Presently, a lot of graphics programs do not restore the contents of @@ -54,6 +55,7 @@ void redraw_screen(struct vc_data *vc, int is_switch); struct tty_struct; int tioclinux(struct tty_struct *tty, unsigned long arg); +#ifdef CONFIG_CONSOLE_TRANSLATIONS /* consolemap.c */ struct unimapinit; @@ -71,6 +73,23 @@ void con_free_unimap(struct vc_data *vc); void con_protect_unimap(struct vc_data *vc, int rdonly); int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc); +#define vc_translate(vc, c) ((vc)->vc_translate[(c) | \ + (vc)->vc_toggle_meta ? 0x80 : 0]) +#else +#define con_set_trans_old(arg) (0) +#define con_get_trans_old(arg) (-EINVAL) +#define con_set_trans_new(arg) (0) +#define con_get_trans_new(arg) (-EINVAL) +#define con_clear_unimap(vc, ui) (0) +#define con_set_unimap(vc, ct, list) (0) +#define con_set_default_unimap(vc) (0) +#define con_copy_unimap(d, s) (0) +#define con_get_unimap(vc, ct, uct, list) (-EINVAL) +#define con_free_unimap(vc) do { ; } while (0) + +#define vc_translate(vc, c) (c) +#endif + /* vt.c */ int vt_waitactive(int vt); void change_console(struct vc_data *new_vc); From 688c91755dc3d3c03d8c67c1df13c02be258768e Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Mon, 16 Jun 2008 15:51:08 -0700 Subject: [PATCH 0021/1435] softlockup: print a module list on being stuck Most places in the kernel that go BUG: print a module list (which is very useful for doing statistics and finding patterns), however the softlockup detector does not do this yet. This patch adds the one line change to fix this gap. Signed-off-by: Arjan van de Ven Signed-off-by: Ingo Molnar --- kernel/softlockup.c | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/softlockup.c b/kernel/softlockup.c index 6b682d86bdd..f2bf5decb10 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c @@ -143,6 +143,7 @@ void softlockup_tick(void) printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %lus! [%s:%d]\n", this_cpu, now - touch_timestamp, current->comm, task_pid_nr(current)); + print_modules(); if (regs) show_regs(regs); else From 8d5be7f4e8515af461cbc8f07687ccc81507d508 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Wed, 25 Jun 2008 08:50:10 +0200 Subject: [PATCH 0022/1435] softlockup: show irqtrace This patch adds some information about when interrupts were last enabled and disabled to the output of the softlockup detector. Signed-off-by: Vegard Nossum Cc: Peter Zijlstra Cc: Johannes Weiner Cc: Arjan van de Ven Signed-off-by: Ingo Molnar --- kernel/softlockup.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/kernel/softlockup.c b/kernel/softlockup.c index f2bf5decb10..97977ecc317 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -144,6 +145,7 @@ void softlockup_tick(void) this_cpu, now - touch_timestamp, current->comm, task_pid_nr(current)); print_modules(); + print_irqtrace_events(current); if (regs) show_regs(regs); else From dd7a1e5615b1719c0fdffee1ea5a7820ac8141a6 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Fri, 27 Jun 2008 15:07:21 +0200 Subject: [PATCH 0023/1435] softlockup: fix watchdog task wakeup frequency Updating the timestamp more often is pointless as we print the warnings only if we exceed the threshold. And the check for hung tasks relies on the last timestamp, so it will keep working correctly, too. Signed-off-by: Johannes Weiner Cc: Peter Zijlstra Signed-off-by: Ingo Molnar --- kernel/softlockup.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/kernel/softlockup.c b/kernel/softlockup.c index 97977ecc317..d53ab702a7d 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c @@ -130,8 +130,11 @@ void softlockup_tick(void) now = get_timestamp(this_cpu); - /* Wake up the high-prio watchdog task every second: */ - if (now > (touch_timestamp + 1)) + /* + * Wake up the high-prio watchdog task twice per + * threshold timespan. + */ + if (now > touch_timestamp + softlockup_thresh/2) wake_up_process(per_cpu(watchdog_task, this_cpu)); /* Warn about unreasonable delays: */ From 3e2f69fdd1b00166e7d589bce56b2d36a9e74374 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Tue, 1 Jul 2008 09:12:04 +0200 Subject: [PATCH 0024/1435] softlockup: fix watchdog task wakeup frequency The print_timestamp can never be bigger than the touch_timestamp, at maximum it can be equal. And if it is, the second check for touch_timestamp + 1 bigger print_timestamp is always true, too. The check for equality is sufficient as we proceed in one-second-steps and are at least one second away from the last print-out if we have another timestamp. Signed-off-by: Johannes Weiner Cc: Peter Zijlstra Signed-off-by: Ingo Molnar --- kernel/softlockup.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/kernel/softlockup.c b/kernel/softlockup.c index d53ab702a7d..7bd8d1aadd5 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c @@ -116,11 +116,8 @@ void softlockup_tick(void) print_timestamp = per_cpu(print_timestamp, this_cpu); /* report at most once a second */ - if ((print_timestamp >= touch_timestamp && - print_timestamp < (touch_timestamp + 1)) || - did_panic) { + if (print_timestamp == touch_timestamp || did_panic) return; - } /* do not print during early bootup: */ if (unlikely(system_state != SYSTEM_RUNNING)) { From 9982fbface82893e77d211fbabfbd229da6bdde6 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 6 Jul 2008 14:24:08 +0200 Subject: [PATCH 0025/1435] Revert "cpumask: introduce new APIs" This reverts commit acb7669c125676e63cf96582455509216c39745e. the wrappers are not needed anymore. Signed-off-by: Ingo Molnar --- include/linux/cpumask.h | 4 ---- 1 file changed, 4 deletions(-) diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 47418b1b410..80226e77614 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -394,10 +394,6 @@ int __next_cpu_nr(int n, const cpumask_t *srcp); #endif /* NR_CPUS > 64 */ -#define next_cpu_nr(n, src) next_cpu(n, src) -#define cpus_weight_nr(cpumask) cpus_weight(cpumask) -#define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask) - /* * The following particular system cpumasks and operations manage * possible, present and online cpus. Each of them is a fixed size From 75f10b465af9efd59906e9079ed9cbda7e0f7a43 Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Thu, 22 May 2008 16:20:18 +0100 Subject: [PATCH 0026/1435] [ARM] 5047/2: Support resetting by asserting GPIO pin This adds support for resetting via assertion of GPIO pin. This e.g. is used on Sharp Zaurus SL-6000. Signed-off-by: Dmitry Baryshkov Acked-by: Eric Miao Signed-off-by: Russell King --- arch/arm/mach-pxa/Makefile | 2 +- arch/arm/mach-pxa/reset.c | 95 +++++++++++++++++++++++++++++ include/asm-arm/arch-pxa/hardware.h | 5 ++ include/asm-arm/arch-pxa/system.h | 17 +----- 4 files changed, 102 insertions(+), 17 deletions(-) create mode 100644 arch/arm/mach-pxa/reset.c diff --git a/arch/arm/mach-pxa/Makefile b/arch/arm/mach-pxa/Makefile index 0e6d05bb81a..c9e66fbe622 100644 --- a/arch/arm/mach-pxa/Makefile +++ b/arch/arm/mach-pxa/Makefile @@ -4,7 +4,7 @@ # Common support (must be linked before board specific support) obj-y += clock.o devices.o generic.o irq.o dma.o \ - time.o gpio.o + time.o gpio.o reset.o obj-$(CONFIG_PM) += pm.o sleep.o standby.o obj-$(CONFIG_CPU_FREQ) += cpu-pxa.o diff --git a/arch/arm/mach-pxa/reset.c b/arch/arm/mach-pxa/reset.c new file mode 100644 index 00000000000..551f313c94d --- /dev/null +++ b/arch/arm/mach-pxa/reset.c @@ -0,0 +1,95 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include + +#include + +static void do_hw_reset(void); + +static int reset_gpio = -1; + +int init_gpio_reset(int gpio) +{ + int rc; + + rc = gpio_request(gpio, "reset generator"); + if (rc) { + printk(KERN_ERR "Can't request reset_gpio\n"); + goto out; + } + + rc = gpio_direction_input(gpio); + if (rc) { + printk(KERN_ERR "Can't configure reset_gpio for input\n"); + gpio_free(gpio); + goto out; + } + +out: + if (!rc) + reset_gpio = gpio; + + return rc; +} + +/* + * Trigger GPIO reset. + * This covers various types of logic connecting gpio pin + * to RESET pins (nRESET or GPIO_RESET): + */ +static void do_gpio_reset(void) +{ + BUG_ON(reset_gpio == -1); + + /* drive it low */ + gpio_direction_output(reset_gpio, 0); + mdelay(2); + /* rising edge or drive high */ + gpio_set_value(reset_gpio, 1); + mdelay(2); + /* falling edge */ + gpio_set_value(reset_gpio, 0); + + /* give it some time */ + mdelay(10); + + WARN_ON(1); + /* fallback */ + do_hw_reset(); +} + +static void do_hw_reset(void) +{ + /* Initialize the watchdog and let it fire */ + OWER = OWER_WME; + OSSR = OSSR_M3; + OSMR3 = OSCR + 368640; /* ... in 100 ms */ +} + +void arch_reset(char mode) +{ + if (cpu_is_pxa2xx()) + RCSR = RCSR_HWR | RCSR_WDR | RCSR_SMR | RCSR_GPR; + + switch (mode) { + case 's': + /* Jump into ROM at address 0 */ + cpu_reset(0); + break; + case 'h': + do_hw_reset(); + break; + case 'g': + do_gpio_reset(); + break; + } +} + diff --git a/include/asm-arm/arch-pxa/hardware.h b/include/asm-arm/arch-pxa/hardware.h index e25558faa5a..5547ec797ad 100644 --- a/include/asm-arm/arch-pxa/hardware.h +++ b/include/asm-arm/arch-pxa/hardware.h @@ -205,6 +205,11 @@ static inline void __deprecated pxa_set_cken(int clock, int enable) */ extern unsigned int get_memclk_frequency_10khz(void); +/* + * register GPIO as reset generator + */ +extern int init_gpio_reset(int gpio); + #endif #if defined(CONFIG_MACH_ARMCORE) && defined(CONFIG_PCI) diff --git a/include/asm-arm/arch-pxa/system.h b/include/asm-arm/arch-pxa/system.h index 9aa6c2e939e..14894ae3fa6 100644 --- a/include/asm-arm/arch-pxa/system.h +++ b/include/asm-arm/arch-pxa/system.h @@ -20,19 +20,4 @@ static inline void arch_idle(void) } -static inline void arch_reset(char mode) -{ - if (cpu_is_pxa2xx()) - RCSR = RCSR_HWR | RCSR_WDR | RCSR_SMR | RCSR_GPR; - - if (mode == 's') { - /* Jump into ROM at address 0 */ - cpu_reset(0); - } else { - /* Initialize the watchdog and let it fire */ - OWER = OWER_WME; - OSSR = OSSR_M3; - OSMR3 = OSCR + 368640; /* ... in 100 ms */ - } -} - +void arch_reset(char mode); From 4d1fe075e5a84aaae00614cb23096336fda6374f Mon Sep 17 00:00:00 2001 From: Philipp Zabel Date: Thu, 29 May 2008 21:18:07 +0100 Subject: [PATCH 0027/1435] [ARM] 5069/1: pxa: include pxa2xx-regs.h in reset.c for RSCR The RCSR definition was moved into pxa2xx-regs.h. Signed-off-by: Philipp Zabel Acked-by: Dmitry Baryshkov Signed-off-by: Russell King --- arch/arm/mach-pxa/reset.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm/mach-pxa/reset.c b/arch/arm/mach-pxa/reset.c index 551f313c94d..9d39dea57ce 100644 --- a/arch/arm/mach-pxa/reset.c +++ b/arch/arm/mach-pxa/reset.c @@ -11,6 +11,7 @@ #include #include +#include static void do_hw_reset(void); From 86159a98adbb33682a4658c9b1eae95d9c9e25ed Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Thu, 22 May 2008 16:21:48 +0100 Subject: [PATCH 0028/1435] [ARM] 5048/2: Clean up tosa and spitz resetting Use new reset_gpio to reset tosa and spitz PDAs. Signed-off-by: Dmitry Baryshkov Acked-by: Eric Miao Signed-off-by: Russell King --- arch/arm/mach-pxa/spitz.c | 7 ++----- arch/arm/mach-pxa/tosa.c | 9 ++++----- 2 files changed, 6 insertions(+), 10 deletions(-) diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c index dace3820f1e..d58c3e906a9 100644 --- a/arch/arm/mach-pxa/spitz.c +++ b/arch/arm/mach-pxa/spitz.c @@ -530,11 +530,7 @@ static struct platform_device *devices[] __initdata = { static void spitz_poweroff(void) { - pxa_gpio_mode(SPITZ_GPIO_ON_RESET | GPIO_OUT); - GPSR(SPITZ_GPIO_ON_RESET) = GPIO_bit(SPITZ_GPIO_ON_RESET); - - mdelay(1000); - arm_machine_restart('h'); + arm_machine_restart('g'); } static void spitz_restart(char mode) @@ -548,6 +544,7 @@ static void spitz_restart(char mode) static void __init common_init(void) { + init_gpio_reset(SPITZ_GPIO_ON_RESET); pm_power_off = spitz_poweroff; arm_pm_restart = spitz_restart; diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c index c2cbd66db81..9ae2271f93c 100644 --- a/arch/arm/mach-pxa/tosa.c +++ b/arch/arm/mach-pxa/tosa.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -467,11 +468,7 @@ static struct platform_device *devices[] __initdata = { static void tosa_poweroff(void) { - pxa_gpio_mode(TOSA_GPIO_ON_RESET | GPIO_OUT); - GPSR(TOSA_GPIO_ON_RESET) = GPIO_bit(TOSA_GPIO_ON_RESET); - - mdelay(1000); - arm_machine_restart('h'); + arm_machine_restart('g'); } static void tosa_restart(char mode) @@ -489,6 +486,8 @@ static void __init tosa_init(void) gpio_set_wake(MFP_PIN_GPIO1, 1); /* We can't pass to gpio-keys since it will drop the Reset altfunc */ + init_gpio_reset(TOSA_GPIO_ON_RESET); + pm_power_off = tosa_poweroff; arm_pm_restart = tosa_restart; From c867155cde9905dfbcb16fcadb1840b06f8869d4 Mon Sep 17 00:00:00 2001 From: eric miao Date: Mon, 26 May 2008 03:28:50 +0100 Subject: [PATCH 0029/1435] [ARM] 5064/1: pxa: explicitly specify tosa keyboard GPIOs' low power states to DRIVE_LOW Signed-off-by: Eric Miao Acked-by: Dmitry Baryshkov Signed-off-by: Russell King --- arch/arm/mach-pxa/tosa.c | 36 ++++++++++++++++---------------- drivers/input/keyboard/tosakbd.c | 2 -- 2 files changed, 18 insertions(+), 20 deletions(-) diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c index 9ae2271f93c..8bf947dcb2e 100644 --- a/arch/arm/mach-pxa/tosa.c +++ b/arch/arm/mach-pxa/tosa.c @@ -130,24 +130,24 @@ static unsigned long tosa_pin_config[] = { GPIO47_STUART_TXD, /* Keybd */ - GPIO58_GPIO, - GPIO59_GPIO, - GPIO60_GPIO, - GPIO61_GPIO, - GPIO62_GPIO, - GPIO63_GPIO, - GPIO64_GPIO, - GPIO65_GPIO, - GPIO66_GPIO, - GPIO67_GPIO, - GPIO68_GPIO, - GPIO69_GPIO, - GPIO70_GPIO, - GPIO71_GPIO, - GPIO72_GPIO, - GPIO73_GPIO, - GPIO74_GPIO, - GPIO75_GPIO, + GPIO58_GPIO | MFP_LPM_DRIVE_LOW, + GPIO59_GPIO | MFP_LPM_DRIVE_LOW, + GPIO60_GPIO | MFP_LPM_DRIVE_LOW, + GPIO61_GPIO | MFP_LPM_DRIVE_LOW, + GPIO62_GPIO | MFP_LPM_DRIVE_LOW, + GPIO63_GPIO | MFP_LPM_DRIVE_LOW, + GPIO64_GPIO | MFP_LPM_DRIVE_LOW, + GPIO65_GPIO | MFP_LPM_DRIVE_LOW, + GPIO66_GPIO | MFP_LPM_DRIVE_LOW, + GPIO67_GPIO | MFP_LPM_DRIVE_LOW, + GPIO68_GPIO | MFP_LPM_DRIVE_LOW, + GPIO69_GPIO | MFP_LPM_DRIVE_LOW, + GPIO70_GPIO | MFP_LPM_DRIVE_LOW, + GPIO71_GPIO | MFP_LPM_DRIVE_LOW, + GPIO72_GPIO | MFP_LPM_DRIVE_LOW, + GPIO73_GPIO | MFP_LPM_DRIVE_LOW, + GPIO74_GPIO | MFP_LPM_DRIVE_LOW, + GPIO75_GPIO | MFP_LPM_DRIVE_LOW, /* SPI */ GPIO81_SSP2_CLK_OUT, diff --git a/drivers/input/keyboard/tosakbd.c b/drivers/input/keyboard/tosakbd.c index 94e444b4ee1..b12b7ee4b6a 100644 --- a/drivers/input/keyboard/tosakbd.c +++ b/drivers/input/keyboard/tosakbd.c @@ -215,8 +215,6 @@ static int tosakbd_suspend(struct platform_device *dev, pm_message_t state) unsigned long flags; spin_lock_irqsave(&tosakbd->lock, flags); - PGSR1 = (PGSR1 & ~TOSA_GPIO_LOW_STROBE_BIT); - PGSR2 = (PGSR2 & ~TOSA_GPIO_HIGH_STROBE_BIT); tosakbd->suspended = 1; spin_unlock_irqrestore(&tosakbd->lock, flags); From c4d5f8d43a5423a6bbba4eec1fab9c46e8d53c5d Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Mon, 9 Jun 2008 13:23:50 +0100 Subject: [PATCH 0030/1435] [ARM] 5081/1: tosa: fix SD GPIOs This changes SD-related GPIO names to be more informative, and allocates two more SD-related GPIOs. Signed-off-by: Dmitry Baryshkov Signed-off-by: Russell King --- arch/arm/mach-pxa/tosa.c | 32 +++++++++++++++++++++++++++++--- 1 file changed, 29 insertions(+), 3 deletions(-) diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c index 8bf947dcb2e..ba95cd66b98 100644 --- a/arch/arm/mach-pxa/tosa.c +++ b/arch/arm/mach-pxa/tosa.c @@ -87,7 +87,7 @@ static unsigned long tosa_pin_config[] = { GPIO6_MMC_CLK, GPIO8_MMC_CS0, GPIO9_GPIO, /* Detect */ - // GPIO10 nSD_INT + GPIO10_GPIO, /* nSD_INT */ /* CF */ GPIO13_GPIO, /* CD_IRQ */ @@ -250,6 +250,15 @@ static int tosa_mci_init(struct device *dev, irq_handler_t tosa_detect_int, void tosa_mci_platform_data.detect_delay = msecs_to_jiffies(250); + err = gpio_request(TOSA_GPIO_nSD_DETECT, "MMC/SD card detect"); + if (err) { + printk(KERN_ERR "tosa_mci_init: can't request nSD_DETECT gpio\n"); + goto err_gpio_detect; + } + err = gpio_direction_input(TOSA_GPIO_nSD_DETECT); + if (err) + goto err_gpio_detect_dir; + err = request_irq(TOSA_IRQ_GPIO_nSD_DETECT, tosa_detect_int, IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, "MMC/SD card detect", data); @@ -258,7 +267,7 @@ static int tosa_mci_init(struct device *dev, irq_handler_t tosa_detect_int, void goto err_irq; } - err = gpio_request(TOSA_GPIO_SD_WP, "sd_wp"); + err = gpio_request(TOSA_GPIO_SD_WP, "SD Write Protect"); if (err) { printk(KERN_ERR "tosa_mci_init: can't request SD_WP gpio\n"); goto err_gpio_wp; @@ -267,7 +276,7 @@ static int tosa_mci_init(struct device *dev, irq_handler_t tosa_detect_int, void if (err) goto err_gpio_wp_dir; - err = gpio_request(TOSA_GPIO_PWR_ON, "sd_pwr"); + err = gpio_request(TOSA_GPIO_PWR_ON, "SD Power"); if (err) { printk(KERN_ERR "tosa_mci_init: can't request SD_PWR gpio\n"); goto err_gpio_pwr; @@ -276,8 +285,20 @@ static int tosa_mci_init(struct device *dev, irq_handler_t tosa_detect_int, void if (err) goto err_gpio_pwr_dir; + err = gpio_request(TOSA_GPIO_nSD_INT, "SD Int"); + if (err) { + printk(KERN_ERR "tosa_mci_init: can't request SD_PWR gpio\n"); + goto err_gpio_int; + } + err = gpio_direction_input(TOSA_GPIO_nSD_INT); + if (err) + goto err_gpio_int_dir; + return 0; +err_gpio_int_dir: + gpio_free(TOSA_GPIO_nSD_INT); +err_gpio_int: err_gpio_pwr_dir: gpio_free(TOSA_GPIO_PWR_ON); err_gpio_pwr: @@ -286,6 +307,9 @@ err_gpio_wp_dir: err_gpio_wp: free_irq(TOSA_IRQ_GPIO_nSD_DETECT, data); err_irq: +err_gpio_detect_dir: + gpio_free(TOSA_GPIO_nSD_DETECT); +err_gpio_detect: return err; } @@ -307,9 +331,11 @@ static int tosa_mci_get_ro(struct device *dev) static void tosa_mci_exit(struct device *dev, void *data) { + gpio_free(TOSA_GPIO_nSD_INT); gpio_free(TOSA_GPIO_PWR_ON); gpio_free(TOSA_GPIO_SD_WP); free_irq(TOSA_IRQ_GPIO_nSD_DETECT, data); + gpio_free(TOSA_GPIO_nSD_DETECT); } static struct pxamci_platform_data tosa_mci_platform_data = { From debba222b20c7e98d7875d9c70552b3094038ab2 Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Wed, 11 Jun 2008 13:27:24 +0100 Subject: [PATCH 0031/1435] [ARM] 5089/1: tosa: cleanup includes Signed-off-by: Dmitry Baryshkov Signed-off-by: Russell King --- arch/arm/mach-pxa/tosa.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c index ba95cd66b98..9892464d7ab 100644 --- a/arch/arm/mach-pxa/tosa.c +++ b/arch/arm/mach-pxa/tosa.c @@ -24,15 +24,9 @@ #include #include #include -#include #include -#include #include -#include -#include -#include -#include #include #include #include @@ -41,8 +35,6 @@ #include #include -#include -#include #include #include From b032fccca80cbbedaa80e5a72a202a43f08aa97e Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Thu, 12 Jun 2008 11:42:07 +0100 Subject: [PATCH 0032/1435] [ARM] 5083/2: Tosa: fix IrDA transciver powerup. On tosa the tranciver LED isn't powered down if the GPIO47 (STUART_TX) isn't configured as low-level. Power it down if IrDA is off to save a bit of power. Signed-off-by: Dmitry Baryshkov Signed-off-by: Russell King --- arch/arm/mach-pxa/tosa.c | 58 ++++++++++++++++++++++++++------- include/asm-arm/arch-pxa/tosa.h | 1 + 2 files changed, 47 insertions(+), 12 deletions(-) diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c index 9892464d7ab..7a89f764acf 100644 --- a/arch/arm/mach-pxa/tosa.c +++ b/arch/arm/mach-pxa/tosa.c @@ -117,10 +117,6 @@ static unsigned long tosa_pin_config[] = { GPIO44_BTUART_CTS, GPIO45_BTUART_RTS, - /* IrDA */ - GPIO46_STUART_RXD, - GPIO47_STUART_TXD, - /* Keybd */ GPIO58_GPIO | MFP_LPM_DRIVE_LOW, GPIO59_GPIO | MFP_LPM_DRIVE_LOW, @@ -147,6 +143,17 @@ static unsigned long tosa_pin_config[] = { GPIO83_SSP2_TXD, }; +static unsigned long tosa_pin_irda_off[] = { + GPIO46_STUART_RXD, + GPIO47_GPIO | MFP_LPM_DRIVE_LOW, +}; + +static unsigned long tosa_pin_irda_on[] = { + GPIO46_STUART_RXD, + GPIO47_STUART_TXD, +}; + + /* * SCOOP Device */ @@ -341,29 +348,55 @@ static struct pxamci_platform_data tosa_mci_platform_data = { /* * Irda */ +static void tosa_irda_transceiver_mode(struct device *dev, int mode) +{ + if (mode & IR_OFF) { + gpio_set_value(TOSA_GPIO_IR_POWERDWN, 0); + pxa2xx_mfp_config(ARRAY_AND_SIZE(tosa_pin_irda_off)); + gpio_direction_output(TOSA_GPIO_IRDA_TX, 0); + } else { + pxa2xx_mfp_config(ARRAY_AND_SIZE(tosa_pin_irda_on)); + gpio_set_value(TOSA_GPIO_IR_POWERDWN, 1); + } +} + static int tosa_irda_startup(struct device *dev) { int ret; + ret = gpio_request(TOSA_GPIO_IRDA_TX, "IrDA TX"); + if (ret) + goto err_tx; + ret = gpio_direction_output(TOSA_GPIO_IRDA_TX, 0); + if (ret) + goto err_tx_dir; + ret = gpio_request(TOSA_GPIO_IR_POWERDWN, "IrDA powerdown"); if (ret) - return ret; + goto err_pwr; ret = gpio_direction_output(TOSA_GPIO_IR_POWERDWN, 0); if (ret) - gpio_free(TOSA_GPIO_IR_POWERDWN); + goto err_pwr_dir; + tosa_irda_transceiver_mode(dev, IR_SIRMODE | IR_OFF); + + return 0; + +err_pwr_dir: + gpio_free(TOSA_GPIO_IR_POWERDWN); +err_pwr: +err_tx_dir: + gpio_free(TOSA_GPIO_IRDA_TX); +err_tx: return ret; - } +} static void tosa_irda_shutdown(struct device *dev) { + tosa_irda_transceiver_mode(dev, IR_SIRMODE | IR_OFF); gpio_free(TOSA_GPIO_IR_POWERDWN); -} - -static void tosa_irda_transceiver_mode(struct device *dev, int mode) -{ - gpio_set_value(TOSA_GPIO_IR_POWERDWN, !(mode & IR_OFF)); + gpio_free(TOSA_GPIO_IRDA_TX); } static struct pxaficp_platform_data tosa_ficp_platform_data = { @@ -501,6 +534,7 @@ static void tosa_restart(char mode) static void __init tosa_init(void) { pxa2xx_mfp_config(ARRAY_AND_SIZE(tosa_pin_config)); + pxa2xx_mfp_config(ARRAY_AND_SIZE(tosa_pin_irda_off)); gpio_set_wake(MFP_PIN_GPIO1, 1); /* We can't pass to gpio-keys since it will drop the Reset altfunc */ diff --git a/include/asm-arm/arch-pxa/tosa.h b/include/asm-arm/arch-pxa/tosa.h index c5b6fde6907..18822621615 100644 --- a/include/asm-arm/arch-pxa/tosa.h +++ b/include/asm-arm/arch-pxa/tosa.h @@ -99,6 +99,7 @@ #define TOSA_GPIO_TP_INT (32) /* Touch Panel pen down interrupt */ #define TOSA_GPIO_JC_CF_IRQ (36) /* CF slot1 Ready */ #define TOSA_GPIO_BAT_LOCKED (38) /* Battery locked */ +#define TOSA_GPIO_IRDA_TX (47) #define TOSA_GPIO_TG_SPI_SCLK (81) #define TOSA_GPIO_TG_SPI_CS (82) #define TOSA_GPIO_TG_SPI_MOSI (83) From d6315949ac5527efd00d48283a9e33361c86e8e9 Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Sun, 22 Jun 2008 12:01:58 +0100 Subject: [PATCH 0033/1435] [ARM] 5096/2: Support Toshiba TC6393XB Mobile I/O Controller. Add support for Toshiba TC6393XB companion chip. Currently only GPIO and part of IRQ features of the device are supported. Signed-off-by: Dmitry Baryshkov Signed-off-by: Russell King --- drivers/mfd/Kconfig | 6 + drivers/mfd/Makefile | 2 + drivers/mfd/tc6393xb.c | 537 +++++++++++++++++++++++++++++++++++ include/linux/mfd/tc6393xb.h | 47 +++ 4 files changed, 592 insertions(+) create mode 100644 drivers/mfd/tc6393xb.c create mode 100644 include/linux/mfd/tc6393xb.h diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 2566479937c..1a1ac262fc8 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -38,6 +38,12 @@ config HTC_PASIC3 HTC Magician devices, respectively. Actual functionality is handled by the leds-pasic3 and ds1wm drivers. +config MFD_TC6393XB + bool "Support Toshiba TC6393XB" + depends on HAVE_GPIO_LIB + help + Support for Toshiba Mobile IO Controller TC6393XB + endmenu menu "Multimedia Capabilities Port drivers" diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index eef4e26807d..b4168442d53 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -8,6 +8,8 @@ obj-$(CONFIG_MFD_ASIC3) += asic3.o obj-$(CONFIG_HTC_EGPIO) += htc-egpio.o obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o +obj-$(CONFIG_MFD_TC6393XB) += tc6393xb.o + obj-$(CONFIG_MCP) += mcp-core.o obj-$(CONFIG_MCP_SA11X0) += mcp-sa11x0.o obj-$(CONFIG_MCP_UCB1200) += ucb1x00-core.o diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c new file mode 100644 index 00000000000..4d7192edefe --- /dev/null +++ b/drivers/mfd/tc6393xb.c @@ -0,0 +1,537 @@ +/* + * Toshiba TC6393XB SoC support + * + * Copyright(c) 2005-2006 Chris Humbert + * Copyright(c) 2005 Dirk Opfer + * Copyright(c) 2005 Ian Molton + * Copyright(c) 2007 Dmitry Baryshkov + * + * Based on code written by Sharp/Lineo for 2.4 kernels + * Based on locomo.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define SCR_REVID 0x08 /* b Revision ID */ +#define SCR_ISR 0x50 /* b Interrupt Status */ +#define SCR_IMR 0x52 /* b Interrupt Mask */ +#define SCR_IRR 0x54 /* b Interrupt Routing */ +#define SCR_GPER 0x60 /* w GP Enable */ +#define SCR_GPI_SR(i) (0x64 + (i)) /* b3 GPI Status */ +#define SCR_GPI_IMR(i) (0x68 + (i)) /* b3 GPI INT Mask */ +#define SCR_GPI_EDER(i) (0x6c + (i)) /* b3 GPI Edge Detect Enable */ +#define SCR_GPI_LIR(i) (0x70 + (i)) /* b3 GPI Level Invert */ +#define SCR_GPO_DSR(i) (0x78 + (i)) /* b3 GPO Data Set */ +#define SCR_GPO_DOECR(i) (0x7c + (i)) /* b3 GPO Data OE Control */ +#define SCR_GP_IARCR(i) (0x80 + (i)) /* b3 GP Internal Active Register Control */ +#define SCR_GP_IARLCR(i) (0x84 + (i)) /* b3 GP INTERNAL Active Register Level Control */ +#define SCR_GPI_BCR(i) (0x88 + (i)) /* b3 GPI Buffer Control */ +#define SCR_GPA_IARCR 0x8c /* w GPa Internal Active Register Control */ +#define SCR_GPA_IARLCR 0x90 /* w GPa Internal Active Register Level Control */ +#define SCR_GPA_BCR 0x94 /* w GPa Buffer Control */ +#define SCR_CCR 0x98 /* w Clock Control */ +#define SCR_PLL2CR 0x9a /* w PLL2 Control */ +#define SCR_PLL1CR 0x9c /* l PLL1 Control */ +#define SCR_DIARCR 0xa0 /* b Device Internal Active Register Control */ +#define SCR_DBOCR 0xa1 /* b Device Buffer Off Control */ +#define SCR_FER 0xe0 /* b Function Enable */ +#define SCR_MCR 0xe4 /* w Mode Control */ +#define SCR_CONFIG 0xfc /* b Configuration Control */ +#define SCR_DEBUG 0xff /* b Debug */ + +#define SCR_CCR_CK32K BIT(0) +#define SCR_CCR_USBCK BIT(1) +#define SCR_CCR_UNK1 BIT(4) +#define SCR_CCR_MCLK_MASK (7 << 8) +#define SCR_CCR_MCLK_OFF (0 << 8) +#define SCR_CCR_MCLK_12 (1 << 8) +#define SCR_CCR_MCLK_24 (2 << 8) +#define SCR_CCR_MCLK_48 (3 << 8) +#define SCR_CCR_HCLK_MASK (3 << 12) +#define SCR_CCR_HCLK_24 (0 << 12) +#define SCR_CCR_HCLK_48 (1 << 12) + +#define SCR_FER_USBEN BIT(0) /* USB host enable */ +#define SCR_FER_LCDCVEN BIT(1) /* polysilicon TFT enable */ +#define SCR_FER_SLCDEN BIT(2) /* SLCD enable */ + +#define SCR_MCR_RDY_MASK (3 << 0) +#define SCR_MCR_RDY_OPENDRAIN (0 << 0) +#define SCR_MCR_RDY_TRISTATE (1 << 0) +#define SCR_MCR_RDY_PUSHPULL (2 << 0) +#define SCR_MCR_RDY_UNK BIT(2) +#define SCR_MCR_RDY_EN BIT(3) +#define SCR_MCR_INT_MASK (3 << 4) +#define SCR_MCR_INT_OPENDRAIN (0 << 4) +#define SCR_MCR_INT_TRISTATE (1 << 4) +#define SCR_MCR_INT_PUSHPULL (2 << 4) +#define SCR_MCR_INT_UNK BIT(6) +#define SCR_MCR_INT_EN BIT(7) +/* bits 8 - 16 are unknown */ + +#define TC_GPIO_BIT(i) (1 << (i & 0x7)) + +/*--------------------------------------------------------------------------*/ + +struct tc6393xb { + void __iomem *scr; + + struct gpio_chip gpio; + + struct clk *clk; /* 3,6 Mhz */ + + spinlock_t lock; /* protects RMW cycles */ + + struct { + u8 fer; + u16 ccr; + u8 gpi_bcr[3]; + u8 gpo_dsr[3]; + u8 gpo_doecr[3]; + } suspend_state; + + struct resource rscr; + struct resource *iomem; + int irq; + int irq_base; +}; + +/*--------------------------------------------------------------------------*/ + +static int tc6393xb_gpio_get(struct gpio_chip *chip, + unsigned offset) +{ + struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio); + + /* XXX: does dsr also represent inputs? */ + return ioread8(tc6393xb->scr + SCR_GPO_DSR(offset / 8)) + & TC_GPIO_BIT(offset); +} + +static void __tc6393xb_gpio_set(struct gpio_chip *chip, + unsigned offset, int value) +{ + struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio); + u8 dsr; + + dsr = ioread8(tc6393xb->scr + SCR_GPO_DSR(offset / 8)); + if (value) + dsr |= TC_GPIO_BIT(offset); + else + dsr &= ~TC_GPIO_BIT(offset); + + iowrite8(dsr, tc6393xb->scr + SCR_GPO_DSR(offset / 8)); +} + +static void tc6393xb_gpio_set(struct gpio_chip *chip, + unsigned offset, int value) +{ + struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio); + unsigned long flags; + + spin_lock_irqsave(&tc6393xb->lock, flags); + + __tc6393xb_gpio_set(chip, offset, value); + + spin_unlock_irqrestore(&tc6393xb->lock, flags); +} + +static int tc6393xb_gpio_direction_input(struct gpio_chip *chip, + unsigned offset) +{ + struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio); + unsigned long flags; + u8 doecr; + + spin_lock_irqsave(&tc6393xb->lock, flags); + + doecr = ioread8(tc6393xb->scr + SCR_GPO_DOECR(offset / 8)); + doecr &= ~TC_GPIO_BIT(offset); + iowrite8(doecr, tc6393xb->scr + SCR_GPO_DOECR(offset / 8)); + + spin_unlock_irqrestore(&tc6393xb->lock, flags); + + return 0; +} + +static int tc6393xb_gpio_direction_output(struct gpio_chip *chip, + unsigned offset, int value) +{ + struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio); + unsigned long flags; + u8 doecr; + + spin_lock_irqsave(&tc6393xb->lock, flags); + + __tc6393xb_gpio_set(chip, offset, value); + + doecr = ioread8(tc6393xb->scr + SCR_GPO_DOECR(offset / 8)); + doecr |= TC_GPIO_BIT(offset); + iowrite8(doecr, tc6393xb->scr + SCR_GPO_DOECR(offset / 8)); + + spin_unlock_irqrestore(&tc6393xb->lock, flags); + + return 0; +} + +static int tc6393xb_register_gpio(struct tc6393xb *tc6393xb, int gpio_base) +{ + tc6393xb->gpio.label = "tc6393xb"; + tc6393xb->gpio.base = gpio_base; + tc6393xb->gpio.ngpio = 16; + tc6393xb->gpio.set = tc6393xb_gpio_set; + tc6393xb->gpio.get = tc6393xb_gpio_get; + tc6393xb->gpio.direction_input = tc6393xb_gpio_direction_input; + tc6393xb->gpio.direction_output = tc6393xb_gpio_direction_output; + + return gpiochip_add(&tc6393xb->gpio); +} + +/*--------------------------------------------------------------------------*/ + +static void +tc6393xb_irq(unsigned int irq, struct irq_desc *desc) +{ + struct tc6393xb *tc6393xb = get_irq_data(irq); + unsigned int isr; + unsigned int i, irq_base; + + irq_base = tc6393xb->irq_base; + + while ((isr = ioread8(tc6393xb->scr + SCR_ISR) & + ~ioread8(tc6393xb->scr + SCR_IMR))) + for (i = 0; i < TC6393XB_NR_IRQS; i++) { + if (isr & (1 << i)) + generic_handle_irq(irq_base + i); + } +} + +static void tc6393xb_irq_ack(unsigned int irq) +{ +} + +static void tc6393xb_irq_mask(unsigned int irq) +{ + struct tc6393xb *tc6393xb = get_irq_chip_data(irq); + unsigned long flags; + u8 imr; + + spin_lock_irqsave(&tc6393xb->lock, flags); + imr = ioread8(tc6393xb->scr + SCR_IMR); + imr |= 1 << (irq - tc6393xb->irq_base); + iowrite8(imr, tc6393xb->scr + SCR_IMR); + spin_unlock_irqrestore(&tc6393xb->lock, flags); +} + +static void tc6393xb_irq_unmask(unsigned int irq) +{ + struct tc6393xb *tc6393xb = get_irq_chip_data(irq); + unsigned long flags; + u8 imr; + + spin_lock_irqsave(&tc6393xb->lock, flags); + imr = ioread8(tc6393xb->scr + SCR_IMR); + imr &= ~(1 << (irq - tc6393xb->irq_base)); + iowrite8(imr, tc6393xb->scr + SCR_IMR); + spin_unlock_irqrestore(&tc6393xb->lock, flags); +} + +static struct irq_chip tc6393xb_chip = { + .name = "tc6393xb", + .ack = tc6393xb_irq_ack, + .mask = tc6393xb_irq_mask, + .unmask = tc6393xb_irq_unmask, +}; + +static void tc6393xb_attach_irq(struct platform_device *dev) +{ + struct tc6393xb *tc6393xb = platform_get_drvdata(dev); + unsigned int irq, irq_base; + + irq_base = tc6393xb->irq_base; + + for (irq = irq_base; irq < irq_base + TC6393XB_NR_IRQS; irq++) { + set_irq_chip(irq, &tc6393xb_chip); + set_irq_chip_data(irq, tc6393xb); + set_irq_handler(irq, handle_edge_irq); + set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); + } + + set_irq_type(tc6393xb->irq, IRQT_FALLING); + set_irq_data(tc6393xb->irq, tc6393xb); + set_irq_chained_handler(tc6393xb->irq, tc6393xb_irq); +} + +static void tc6393xb_detach_irq(struct platform_device *dev) +{ + struct tc6393xb *tc6393xb = platform_get_drvdata(dev); + unsigned int irq, irq_base; + + set_irq_chained_handler(tc6393xb->irq, NULL); + set_irq_data(tc6393xb->irq, NULL); + + irq_base = tc6393xb->irq_base; + + for (irq = irq_base; irq < irq_base + TC6393XB_NR_IRQS; irq++) { + set_irq_flags(irq, 0); + set_irq_chip(irq, NULL); + set_irq_chip_data(irq, NULL); + } +} + +/*--------------------------------------------------------------------------*/ + +static int tc6393xb_hw_init(struct platform_device *dev) +{ + struct tc6393xb_platform_data *tcpd = dev->dev.platform_data; + struct tc6393xb *tc6393xb = platform_get_drvdata(dev); + int i; + + iowrite8(tc6393xb->suspend_state.fer, tc6393xb->scr + SCR_FER); + iowrite16(tcpd->scr_pll2cr, tc6393xb->scr + SCR_PLL2CR); + iowrite16(tc6393xb->suspend_state.ccr, tc6393xb->scr + SCR_CCR); + iowrite16(SCR_MCR_RDY_OPENDRAIN | SCR_MCR_RDY_UNK | SCR_MCR_RDY_EN | + SCR_MCR_INT_OPENDRAIN | SCR_MCR_INT_UNK | SCR_MCR_INT_EN | + BIT(15), tc6393xb->scr + SCR_MCR); + iowrite16(tcpd->scr_gper, tc6393xb->scr + SCR_GPER); + iowrite8(0, tc6393xb->scr + SCR_IRR); + iowrite8(0xbf, tc6393xb->scr + SCR_IMR); + + for (i = 0; i < 3; i++) { + iowrite8(tc6393xb->suspend_state.gpo_dsr[i], + tc6393xb->scr + SCR_GPO_DSR(i)); + iowrite8(tc6393xb->suspend_state.gpo_doecr[i], + tc6393xb->scr + SCR_GPO_DOECR(i)); + iowrite8(tc6393xb->suspend_state.gpi_bcr[i], + tc6393xb->scr + SCR_GPI_BCR(i)); + } + + return 0; +} + +static int __devinit tc6393xb_probe(struct platform_device *dev) +{ + struct tc6393xb_platform_data *tcpd = dev->dev.platform_data; + struct tc6393xb *tc6393xb; + struct resource *iomem; + struct resource *rscr; + int retval, temp; + int i; + + iomem = platform_get_resource(dev, IORESOURCE_MEM, 0); + if (!iomem) + return -EINVAL; + + tc6393xb = kzalloc(sizeof *tc6393xb, GFP_KERNEL); + if (!tc6393xb) { + retval = -ENOMEM; + goto err_kzalloc; + } + + spin_lock_init(&tc6393xb->lock); + + platform_set_drvdata(dev, tc6393xb); + tc6393xb->iomem = iomem; + tc6393xb->irq = platform_get_irq(dev, 0); + tc6393xb->irq_base = tcpd->irq_base; + + tc6393xb->clk = clk_get(&dev->dev, "GPIO27_CLK" /* "CK3P6MI" */); + if (IS_ERR(tc6393xb->clk)) { + retval = PTR_ERR(tc6393xb->clk); + goto err_clk_get; + } + + rscr = &tc6393xb->rscr; + rscr->name = "tc6393xb-core"; + rscr->start = iomem->start; + rscr->end = iomem->start + 0xff; + rscr->flags = IORESOURCE_MEM; + + retval = request_resource(iomem, rscr); + if (retval) + goto err_request_scr; + + tc6393xb->scr = ioremap(rscr->start, rscr->end - rscr->start + 1); + if (!tc6393xb->scr) { + retval = -ENOMEM; + goto err_ioremap; + } + + retval = clk_enable(tc6393xb->clk); + if (retval) + goto err_clk_enable; + + retval = tcpd->enable(dev); + if (retval) + goto err_enable; + + tc6393xb->suspend_state.fer = 0; + for (i = 0; i < 3; i++) { + tc6393xb->suspend_state.gpo_dsr[i] = + (tcpd->scr_gpo_dsr >> (8 * i)) & 0xff; + tc6393xb->suspend_state.gpo_doecr[i] = + (tcpd->scr_gpo_doecr >> (8 * i)) & 0xff; + } + /* + * It may be necessary to change this back to + * platform-dependant code + */ + tc6393xb->suspend_state.ccr = SCR_CCR_UNK1 | + SCR_CCR_HCLK_48; + + retval = tc6393xb_hw_init(dev); + if (retval) + goto err_hw_init; + + printk(KERN_INFO "Toshiba tc6393xb revision %d at 0x%08lx, irq %d\n", + ioread8(tc6393xb->scr + SCR_REVID), + (unsigned long) iomem->start, tc6393xb->irq); + + tc6393xb->gpio.base = -1; + + if (tcpd->gpio_base >= 0) { + retval = tc6393xb_register_gpio(tc6393xb, tcpd->gpio_base); + if (retval) + goto err_gpio_add; + } + + if (tc6393xb->irq) + tc6393xb_attach_irq(dev); + + return 0; + + if (tc6393xb->irq) + tc6393xb_detach_irq(dev); + +err_gpio_add: + if (tc6393xb->gpio.base != -1) + temp = gpiochip_remove(&tc6393xb->gpio); +err_hw_init: + tcpd->disable(dev); +err_clk_enable: + clk_disable(tc6393xb->clk); +err_enable: + iounmap(tc6393xb->scr); +err_ioremap: + release_resource(&tc6393xb->rscr); +err_request_scr: + clk_put(tc6393xb->clk); +err_clk_get: + kfree(tc6393xb); +err_kzalloc: + return retval; +} + +static int __devexit tc6393xb_remove(struct platform_device *dev) +{ + struct tc6393xb_platform_data *tcpd = dev->dev.platform_data; + struct tc6393xb *tc6393xb = platform_get_drvdata(dev); + int ret; + + if (tc6393xb->irq) + tc6393xb_detach_irq(dev); + + if (tc6393xb->gpio.base != -1) { + ret = gpiochip_remove(&tc6393xb->gpio); + if (ret) { + dev_err(&dev->dev, "Can't remove gpio chip: %d\n", ret); + return ret; + } + } + + ret = tcpd->disable(dev); + + clk_disable(tc6393xb->clk); + + iounmap(tc6393xb->scr); + + release_resource(&tc6393xb->rscr); + + platform_set_drvdata(dev, NULL); + + clk_put(tc6393xb->clk); + + kfree(tc6393xb); + + return ret; +} + +#ifdef CONFIG_PM +static int tc6393xb_suspend(struct platform_device *dev, pm_message_t state) +{ + struct tc6393xb_platform_data *tcpd = dev->dev.platform_data; + struct tc6393xb *tc6393xb = platform_get_drvdata(dev); + int i; + + + tc6393xb->suspend_state.ccr = ioread16(tc6393xb->scr + SCR_CCR); + tc6393xb->suspend_state.fer = ioread8(tc6393xb->scr + SCR_FER); + + for (i = 0; i < 3; i++) { + tc6393xb->suspend_state.gpo_dsr[i] = + ioread8(tc6393xb->scr + SCR_GPO_DSR(i)); + tc6393xb->suspend_state.gpo_doecr[i] = + ioread8(tc6393xb->scr + SCR_GPO_DOECR(i)); + tc6393xb->suspend_state.gpi_bcr[i] = + ioread8(tc6393xb->scr + SCR_GPI_BCR(i)); + } + + return tcpd->suspend(dev); +} + +static int tc6393xb_resume(struct platform_device *dev) +{ + struct tc6393xb_platform_data *tcpd = dev->dev.platform_data; + int ret = tcpd->resume(dev); + + if (ret) + return ret; + + return tc6393xb_hw_init(dev); +} +#else +#define tc6393xb_suspend NULL +#define tc6393xb_resume NULL +#endif + +static struct platform_driver tc6393xb_driver = { + .probe = tc6393xb_probe, + .remove = __devexit_p(tc6393xb_remove), + .suspend = tc6393xb_suspend, + .resume = tc6393xb_resume, + + .driver = { + .name = "tc6393xb", + .owner = THIS_MODULE, + }, +}; + +static int __init tc6393xb_init(void) +{ + return platform_driver_register(&tc6393xb_driver); +} + +static void __exit tc6393xb_exit(void) +{ + platform_driver_unregister(&tc6393xb_driver); +} + +subsys_initcall(tc6393xb_init); +module_exit(tc6393xb_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Ian Molton, Dmitry Baryshkov and Dirk Opfer"); +MODULE_DESCRIPTION("tc6393xb Toshiba Mobile IO Controller"); +MODULE_ALIAS("platform:tc6393xb"); diff --git a/include/linux/mfd/tc6393xb.h b/include/linux/mfd/tc6393xb.h new file mode 100644 index 00000000000..0e3dd4ca523 --- /dev/null +++ b/include/linux/mfd/tc6393xb.h @@ -0,0 +1,47 @@ +/* + * Toshiba TC6393XB SoC support + * + * Copyright(c) 2005-2006 Chris Humbert + * Copyright(c) 2005 Dirk Opfer + * Copyright(c) 2005 Ian Molton + * Copyright(c) 2007 Dmitry Baryshkov + * + * Based on code written by Sharp/Lineo for 2.4 kernels + * Based on locomo.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef TC6393XB_H +#define TC6393XB_H + +/* Also one should provide the CK3P6MI clock */ +struct tc6393xb_platform_data { + u16 scr_pll2cr; /* PLL2 Control */ + u16 scr_gper; /* GP Enable */ + u32 scr_gpo_doecr; /* GPO Data OE Control */ + u32 scr_gpo_dsr; /* GPO Data Set */ + + int (*enable)(struct platform_device *dev); + int (*disable)(struct platform_device *dev); + int (*suspend)(struct platform_device *dev); + int (*resume)(struct platform_device *dev); + + int irq_base; /* a base for cascaded irq */ + int gpio_base; +}; + +/* + * Relative to irq_base + */ +#define IRQ_TC6393_NAND 0 +#define IRQ_TC6393_MMC 1 +#define IRQ_TC6393_OHCI 2 +#define IRQ_TC6393_SERIAL 3 +#define IRQ_TC6393_FB 4 + +#define TC6393XB_NR_IRQS 8 + +#endif From bf0116e54e185fd63025f2b975f0f1616ffe41f1 Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Sat, 14 Jun 2008 11:42:02 +0100 Subject: [PATCH 0034/1435] [ARM] 5097/1: Tosa: support TC6393XB device Add definitions for Toshiba TC6393XB companion chip and register the tc6393xb device. Signed-off-by: Dmitry Baryshkov Signed-off-by: Russell King --- arch/arm/mach-pxa/tosa.c | 127 ++++++++++++++++++++++++++++++++ include/asm-arm/arch-pxa/irqs.h | 1 + include/asm-arm/arch-pxa/tosa.h | 44 +++++++---- 3 files changed, 158 insertions(+), 14 deletions(-) diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c index 7a89f764acf..2fe0998ac3f 100644 --- a/arch/arm/mach-pxa/tosa.c +++ b/arch/arm/mach-pxa/tosa.c @@ -18,7 +18,10 @@ #include #include #include +#include +#include #include +#include #include #include #include @@ -509,9 +512,127 @@ static struct platform_device tosaled_device = { }, }; +/* + * Toshiba Mobile IO Controller + */ +static struct resource tc6393xb_resources[] = { + [0] = { + .start = TOSA_LCDC_PHYS, + .end = TOSA_LCDC_PHYS + 0x3ffffff, + .flags = IORESOURCE_MEM, + }, + + [1] = { + .start = TOSA_IRQ_GPIO_TC6393XB_INT, + .end = TOSA_IRQ_GPIO_TC6393XB_INT, + .flags = IORESOURCE_IRQ, + }, +}; + + +static int tosa_tc6393xb_enable(struct platform_device *dev) +{ + int rc; + + rc = gpio_request(TOSA_GPIO_TC6393XB_REST_IN, "tc6393xb #pclr"); + if (rc) + goto err_req_pclr; + rc = gpio_request(TOSA_GPIO_TC6393XB_SUSPEND, "tc6393xb #suspend"); + if (rc) + goto err_req_suspend; + rc = gpio_request(TOSA_GPIO_TC6393XB_L3V_ON, "l3v"); + if (rc) + goto err_req_l3v; + rc = gpio_direction_output(TOSA_GPIO_TC6393XB_L3V_ON, 0); + if (rc) + goto err_dir_l3v; + rc = gpio_direction_output(TOSA_GPIO_TC6393XB_SUSPEND, 0); + if (rc) + goto err_dir_suspend; + rc = gpio_direction_output(TOSA_GPIO_TC6393XB_REST_IN, 0); + if (rc) + goto err_dir_pclr; + + mdelay(1); + + gpio_set_value(TOSA_GPIO_TC6393XB_SUSPEND, 1); + + mdelay(10); + + gpio_set_value(TOSA_GPIO_TC6393XB_REST_IN, 1); + gpio_set_value(TOSA_GPIO_TC6393XB_L3V_ON, 1); + + return 0; +err_dir_pclr: +err_dir_suspend: +err_dir_l3v: + gpio_free(TOSA_GPIO_TC6393XB_L3V_ON); +err_req_l3v: + gpio_free(TOSA_GPIO_TC6393XB_SUSPEND); +err_req_suspend: + gpio_free(TOSA_GPIO_TC6393XB_REST_IN); +err_req_pclr: + return rc; +} + +static int tosa_tc6393xb_disable(struct platform_device *dev) +{ + gpio_free(TOSA_GPIO_TC6393XB_L3V_ON); + gpio_free(TOSA_GPIO_TC6393XB_SUSPEND); + gpio_free(TOSA_GPIO_TC6393XB_REST_IN); + + return 0; +} + +static int tosa_tc6393xb_resume(struct platform_device *dev) +{ + gpio_set_value(TOSA_GPIO_TC6393XB_SUSPEND, 1); + mdelay(10); + gpio_set_value(TOSA_GPIO_TC6393XB_L3V_ON, 1); + mdelay(10); + + return 0; +} + +static int tosa_tc6393xb_suspend(struct platform_device *dev) +{ + gpio_set_value(TOSA_GPIO_TC6393XB_L3V_ON, 0); + gpio_set_value(TOSA_GPIO_TC6393XB_SUSPEND, 0); + return 0; +} + +static struct tc6393xb_platform_data tosa_tc6393xb_setup = { + .scr_pll2cr = 0x0cc1, + .scr_gper = 0x3300, + .scr_gpo_dsr = + TOSA_TC6393XB_GPIO_BIT(TOSA_GPIO_CARD_VCC_ON), + .scr_gpo_doecr = + TOSA_TC6393XB_GPIO_BIT(TOSA_GPIO_CARD_VCC_ON), + + .irq_base = IRQ_BOARD_START, + .gpio_base = TOSA_TC6393XB_GPIO_BASE, + + .enable = tosa_tc6393xb_enable, + .disable = tosa_tc6393xb_disable, + .suspend = tosa_tc6393xb_suspend, + .resume = tosa_tc6393xb_resume, +}; + + +static struct platform_device tc6393xb_device = { + .name = "tc6393xb", + .id = -1, + .dev = { + .platform_data = &tosa_tc6393xb_setup, + }, + .num_resources = ARRAY_SIZE(tc6393xb_resources), + .resource = tc6393xb_resources, +}; + static struct platform_device *devices[] __initdata = { &tosascoop_device, &tosascoop_jc_device, + &tc6393xb_device, &tosakbd_device, &tosa_gpio_keys_device, &tosaled_device, @@ -533,6 +654,8 @@ static void tosa_restart(char mode) static void __init tosa_init(void) { + int dummy; + pxa2xx_mfp_config(ARRAY_AND_SIZE(tosa_pin_config)); pxa2xx_mfp_config(ARRAY_AND_SIZE(tosa_pin_irda_off)); gpio_set_wake(MFP_PIN_GPIO1, 1); @@ -548,6 +671,10 @@ static void __init tosa_init(void) /* enable batt_fault */ PMCR = 0x01; + dummy = gpiochip_reserve(TOSA_SCOOP_GPIO_BASE, 12); + dummy = gpiochip_reserve(TOSA_SCOOP_JC_GPIO_BASE, 12); + dummy = gpiochip_reserve(TOSA_TC6393XB_GPIO_BASE, 16); + pxa_set_mci_info(&tosa_mci_platform_data); pxa_set_udc_info(&udc_info); pxa_set_ficp_info(&tosa_ficp_platform_data); diff --git a/include/asm-arm/arch-pxa/irqs.h b/include/asm-arm/arch-pxa/irqs.h index b6c8fe37768..fa05e76f64e 100644 --- a/include/asm-arm/arch-pxa/irqs.h +++ b/include/asm-arm/arch-pxa/irqs.h @@ -180,6 +180,7 @@ #define NR_IRQS (IRQ_LOCOMO_SPI_TEND + 1) #elif defined(CONFIG_ARCH_LUBBOCK) || \ defined(CONFIG_MACH_LOGICPD_PXA270) || \ + defined(CONFIG_MACH_TOSA) || \ defined(CONFIG_MACH_MAINSTONE) || \ defined(CONFIG_MACH_PCM027) || \ defined(CONFIG_MACH_MAGICIAN) diff --git a/include/asm-arm/arch-pxa/tosa.h b/include/asm-arm/arch-pxa/tosa.h index 18822621615..a16c103b725 100644 --- a/include/asm-arm/arch-pxa/tosa.h +++ b/include/asm-arm/arch-pxa/tosa.h @@ -25,7 +25,7 @@ */ #define TOSA_SCOOP_GPIO_BASE NR_BUILTIN_GPIO #define TOSA_SCOOP_PXA_VCORE1 SCOOP_GPCR_PA11 -#define TOSA_SCOOP_TC6393_REST_IN SCOOP_GPCR_PA12 +#define TOSA_GPIO_TC6393XB_REST_IN (TOSA_SCOOP_GPIO_BASE + 1) #define TOSA_GPIO_IR_POWERDWN (TOSA_SCOOP_GPIO_BASE + 2) #define TOSA_GPIO_SD_WP (TOSA_SCOOP_GPIO_BASE + 3) #define TOSA_GPIO_PWR_ON (TOSA_SCOOP_GPIO_BASE + 4) @@ -35,11 +35,9 @@ #define TOSA_SCOOP_AC_IN_OL SCOOP_GPCR_PA19 /* GPIO Direction 1 : output mode / 0:input mode */ -#define TOSA_SCOOP_IO_DIR ( TOSA_SCOOP_PXA_VCORE1 | TOSA_SCOOP_TC6393_REST_IN | \ +#define TOSA_SCOOP_IO_DIR (TOSA_SCOOP_PXA_VCORE1 | \ TOSA_SCOOP_AUD_PWR_ON |\ - TOSA_SCOOP_BT_RESET | TOSA_SCOOP_BT_PWR_EN ) -/* GPIO out put level when init 1: Hi */ -#define TOSA_SCOOP_IO_OUT ( TOSA_SCOOP_TC6393_REST_IN ) + TOSA_SCOOP_BT_RESET | TOSA_SCOOP_BT_PWR_EN) /* * SCOOP2 jacket GPIOs @@ -49,16 +47,34 @@ #define TOSA_GPIO_NOTE_LED (TOSA_SCOOP_JC_GPIO_BASE + 1) #define TOSA_GPIO_CHRG_ERR_LED (TOSA_SCOOP_JC_GPIO_BASE + 2) #define TOSA_GPIO_USB_PULLUP (TOSA_SCOOP_JC_GPIO_BASE + 3) -#define TOSA_SCOOP_JC_TC6393_SUSPEND SCOOP_GPCR_PA15 -#define TOSA_SCOOP_JC_TC3693_L3V_ON SCOOP_GPCR_PA16 +#define TOSA_GPIO_TC6393XB_SUSPEND (TOSA_SCOOP_JC_GPIO_BASE + 4) +#define TOSA_GPIO_TC6393XB_L3V_ON (TOSA_SCOOP_JC_GPIO_BASE + 5) #define TOSA_SCOOP_JC_WLAN_DETECT SCOOP_GPCR_PA17 #define TOSA_GPIO_WLAN_LED (TOSA_SCOOP_JC_GPIO_BASE + 7) #define TOSA_SCOOP_JC_CARD_LIMIT_SEL SCOOP_GPCR_PA19 /* GPIO Direction 1 : output mode / 0:input mode */ -#define TOSA_SCOOP_JC_IO_DIR ( \ - TOSA_SCOOP_JC_TC6393_SUSPEND | TOSA_SCOOP_JC_TC3693_L3V_ON | \ - TOSA_SCOOP_JC_CARD_LIMIT_SEL ) +#define TOSA_SCOOP_JC_IO_DIR (TOSA_SCOOP_JC_CARD_LIMIT_SEL) + +/* + * TC6393XB GPIOs + */ +#define TOSA_TC6393XB_GPIO_BASE (NR_BUILTIN_GPIO + 2 * 12) +#define TOSA_TC6393XB_GPIO(i) (TOSA_TC6393XB_GPIO_BASE + (i)) +#define TOSA_TC6393XB_GPIO_BIT(gpio) (1 << (gpio - TOSA_TC6393XB_GPIO_BASE)) + +#define TOSA_GPIO_TG_ON (TOSA_TC6393XB_GPIO_BASE + 0) +#define TOSA_GPIO_L_MUTE (TOSA_TC6393XB_GPIO_BASE + 1) +#define TOSA_GPIO_BL_C20MA (TOSA_TC6393XB_GPIO_BASE + 3) +#define TOSA_GPIO_CARD_VCC_ON (TOSA_TC6393XB_GPIO_BASE + 4) +#define TOSA_GPIO_CHARGE_OFF (TOSA_TC6393XB_GPIO_BASE + 6) +#define TOSA_GPIO_CHARGE_OFF_JC (TOSA_TC6393XB_GPIO_BASE + 7) +#define TOSA_GPIO_BAT0_V_ON (TOSA_TC6393XB_GPIO_BASE + 9) +#define TOSA_GPIO_BAT1_V_ON (TOSA_TC6393XB_GPIO_BASE + 10) +#define TOSA_GPIO_BU_CHRG_ON (TOSA_TC6393XB_GPIO_BASE + 11) +#define TOSA_GPIO_BAT_SW_ON (TOSA_TC6393XB_GPIO_BASE + 12) +#define TOSA_GPIO_BAT0_TH_ON (TOSA_TC6393XB_GPIO_BASE + 14) +#define TOSA_GPIO_BAT1_TH_ON (TOSA_TC6393XB_GPIO_BASE + 15) /* * Timing Generator @@ -84,13 +100,13 @@ #define TOSA_GPIO_JACKET_DETECT (7) #define TOSA_GPIO_nSD_DETECT (9) #define TOSA_GPIO_nSD_INT (10) -#define TOSA_GPIO_TC6393_CLK (11) +#define TOSA_GPIO_TC6393XB_CLK (11) #define TOSA_GPIO_BAT1_CRG (12) #define TOSA_GPIO_CF_CD (13) #define TOSA_GPIO_BAT0_CRG (14) -#define TOSA_GPIO_TC6393_INT (15) +#define TOSA_GPIO_TC6393XB_INT (15) #define TOSA_GPIO_BAT0_LOW (17) -#define TOSA_GPIO_TC6393_RDY (18) +#define TOSA_GPIO_TC6393XB_RDY (18) #define TOSA_GPIO_ON_RESET (19) #define TOSA_GPIO_EAR_IN (20) #define TOSA_GPIO_CF_IRQ (21) /* CF slot0 Ready */ @@ -138,7 +154,7 @@ #define TOSA_IRQ_GPIO_BAT1_CRG IRQ_GPIO(TOSA_GPIO_BAT1_CRG) #define TOSA_IRQ_GPIO_CF_CD IRQ_GPIO(TOSA_GPIO_CF_CD) #define TOSA_IRQ_GPIO_BAT0_CRG IRQ_GPIO(TOSA_GPIO_BAT0_CRG) -#define TOSA_IRQ_GPIO_TC6393_INT IRQ_GPIO(TOSA_GPIO_TC6393_INT) +#define TOSA_IRQ_GPIO_TC6393XB_INT IRQ_GPIO(TOSA_GPIO_TC6393XB_INT) #define TOSA_IRQ_GPIO_BAT0_LOW IRQ_GPIO(TOSA_GPIO_BAT0_LOW) #define TOSA_IRQ_GPIO_EAR_IN IRQ_GPIO(TOSA_GPIO_EAR_IN) #define TOSA_IRQ_GPIO_CF_IRQ IRQ_GPIO(TOSA_GPIO_CF_IRQ) From 4440cbd69fc71bab8ed932e237b2a453b365ab9b Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Sat, 14 Jun 2008 11:42:57 +0100 Subject: [PATCH 0035/1435] [ARM] 5098/1: fix sound/soc/pxa/tosa.c to new gpio api The sound/soc/pxa/tosa.c contains dependencies on parts that never ever hit mainline. Replace them with current support for tc6393xb. Signed-off-by: Dmitry Baryshkov Acked-by: Mark Brown Signed-off-by: Russell King --- sound/soc/pxa/Kconfig | 1 + sound/soc/pxa/tosa.c | 29 ++++++++++++++++++++--------- 2 files changed, 21 insertions(+), 9 deletions(-) diff --git a/sound/soc/pxa/Kconfig b/sound/soc/pxa/Kconfig index 484f883459e..329b33f37ef 100644 --- a/sound/soc/pxa/Kconfig +++ b/sound/soc/pxa/Kconfig @@ -48,6 +48,7 @@ config SND_PXA2XX_SOC_POODLE config SND_PXA2XX_SOC_TOSA tristate "SoC AC97 Audio support for Tosa" depends on SND_PXA2XX_SOC && MACH_TOSA + depends on MFD_TC6393XB select SND_PXA2XX_SOC_AC97 select SND_SOC_WM9712 help diff --git a/sound/soc/pxa/tosa.c b/sound/soc/pxa/tosa.c index 7346d7e5d06..c1462c4d139 100644 --- a/sound/soc/pxa/tosa.c +++ b/sound/soc/pxa/tosa.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -31,7 +32,7 @@ #include #include -#include +#include #include #include #include @@ -138,10 +139,7 @@ static int tosa_set_spk(struct snd_kcontrol *kcontrol, static int tosa_hp_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *k, int event) { - if (SND_SOC_DAPM_EVENT_ON(event)) - set_tc6393_gpio(&tc6393_device.dev,TOSA_TC6393_L_MUTE); - else - reset_tc6393_gpio(&tc6393_device.dev,TOSA_TC6393_L_MUTE); + gpio_set_value(TOSA_GPIO_L_MUTE, SND_SOC_DAPM_EVENT_ON(event) ? 1 :0); return 0; } @@ -261,16 +259,28 @@ static int __init tosa_init(void) if (!machine_is_tosa()) return -ENODEV; + ret = gpio_request(TOSA_GPIO_L_MUTE, "Headphone Jack"); + if (ret) + return ret; + gpio_direction_output(TOSA_GPIO_L_MUTE, 0); + tosa_snd_device = platform_device_alloc("soc-audio", -1); - if (!tosa_snd_device) - return -ENOMEM; + if (!tosa_snd_device) { + ret = -ENOMEM; + goto err_alloc; + } platform_set_drvdata(tosa_snd_device, &tosa_snd_devdata); tosa_snd_devdata.dev = &tosa_snd_device->dev; ret = platform_device_add(tosa_snd_device); - if (ret) - platform_device_put(tosa_snd_device); + if (!ret) + return 0; + + platform_device_put(tosa_snd_device); + +err_alloc: + gpio_free(TOSA_GPIO_L_MUTE); return ret; } @@ -278,6 +288,7 @@ static int __init tosa_init(void) static void __exit tosa_exit(void) { platform_device_unregister(tosa_snd_device); + gpio_free(TOSA_GPIO_L_MUTE); } module_init(tosa_init); From 53b14ea336ca45f34821ad78a1b45c2315283621 Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Sat, 14 Jun 2008 11:43:36 +0100 Subject: [PATCH 0036/1435] [ARM] 5099/1: Tosa: support AC-in detection. Add support for ac-in via pda_power device. Signed-off-by: Dmitry Baryshkov Signed-off-by: Russell King --- arch/arm/mach-pxa/tosa.c | 66 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c index 2fe0998ac3f..f3b12d2897a 100644 --- a/arch/arm/mach-pxa/tosa.c +++ b/arch/arm/mach-pxa/tosa.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include @@ -409,6 +410,70 @@ static struct pxaficp_platform_data tosa_ficp_platform_data = { .shutdown = tosa_irda_shutdown, }; +/* + * Tosa AC IN + */ +static int tosa_power_init(struct device *dev) +{ + int ret = gpio_request(TOSA_GPIO_AC_IN, "ac in"); + if (ret) + goto err_gpio_req; + + ret = gpio_direction_input(TOSA_GPIO_AC_IN); + if (ret) + goto err_gpio_in; + + return 0; + +err_gpio_in: + gpio_free(TOSA_GPIO_AC_IN); +err_gpio_req: + return ret; +} + +static void tosa_power_exit(struct device *dev) +{ + gpio_free(TOSA_GPIO_AC_IN); +} + +static int tosa_power_ac_online(void) +{ + return gpio_get_value(TOSA_GPIO_AC_IN) == 0; +} + +static char *tosa_ac_supplied_to[] = { + "main-battery", + "backup-battery", + "jacket-battery", +}; + +static struct pda_power_pdata tosa_power_data = { + .init = tosa_power_init, + .is_ac_online = tosa_power_ac_online, + .exit = tosa_power_exit, + .supplied_to = tosa_ac_supplied_to, + .num_supplicants = ARRAY_SIZE(tosa_ac_supplied_to), +}; + +static struct resource tosa_power_resource[] = { + { + .name = "ac", + .start = gpio_to_irq(TOSA_GPIO_AC_IN), + .end = gpio_to_irq(TOSA_GPIO_AC_IN), + .flags = IORESOURCE_IRQ | + IORESOURCE_IRQ_HIGHEDGE | + IORESOURCE_IRQ_LOWEDGE, + }, +}; + +static struct platform_device tosa_power_device = { + .name = "pda-power", + .id = -1, + .dev.platform_data = &tosa_power_data, + .resource = tosa_power_resource, + .num_resources = ARRAY_SIZE(tosa_power_resource), +}; + /* * Tosa Keyboard */ @@ -633,6 +698,7 @@ static struct platform_device *devices[] __initdata = { &tosascoop_device, &tosascoop_jc_device, &tc6393xb_device, + &tosa_power_device, &tosakbd_device, &tosa_gpio_keys_device, &tosaled_device, From aa613de676986f136fa6f48a4d709b5d264f4f38 Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Fri, 27 Jun 2008 10:37:19 +0100 Subject: [PATCH 0037/1435] [ARM] 5127/1: Core MFD support This patch provides a common subdevice registration system for MFD type chips, using platfrom device. Signed-off-by: Ian Molton Signed-off-by: Dmitry Baryshkov Signed-off-by: Russell King --- drivers/mfd/Kconfig | 4 ++ drivers/mfd/Makefile | 2 + drivers/mfd/mfd-core.c | 114 +++++++++++++++++++++++++++++++++++++++ include/linux/mfd/core.h | 55 +++++++++++++++++++ 4 files changed, 175 insertions(+) create mode 100644 drivers/mfd/mfd-core.c create mode 100644 include/linux/mfd/core.h diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 1a1ac262fc8..8ebc0be1095 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -5,6 +5,10 @@ menu "Multifunction device drivers" depends on HAS_IOMEM +config MFD_CORE + tristate + default n + config MFD_SM501 tristate "Support for Silicon Motion SM501" ---help--- diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index b4168442d53..33daa2f45dd 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -10,6 +10,8 @@ obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o obj-$(CONFIG_MFD_TC6393XB) += tc6393xb.o +obj-$(CONFIG_MFD_CORE) += mfd-core.o + obj-$(CONFIG_MCP) += mcp-core.o obj-$(CONFIG_MCP_SA11X0) += mcp-sa11x0.o obj-$(CONFIG_MCP_UCB1200) += ucb1x00-core.o diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c new file mode 100644 index 00000000000..d7d88ce053a --- /dev/null +++ b/drivers/mfd/mfd-core.c @@ -0,0 +1,114 @@ +/* + * drivers/mfd/mfd-core.c + * + * core MFD support + * Copyright (c) 2006 Ian Molton + * Copyright (c) 2007,2008 Dmitry Baryshkov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include +#include +#include + +static int mfd_add_device(struct platform_device *parent, + const struct mfd_cell *cell, + struct resource *mem_base, + int irq_base) +{ + struct resource res[cell->num_resources]; + struct platform_device *pdev; + int ret = -ENOMEM; + int r; + + pdev = platform_device_alloc(cell->name, parent->id); + if (!pdev) + goto fail_alloc; + + pdev->dev.parent = &parent->dev; + + ret = platform_device_add_data(pdev, + cell, sizeof(struct mfd_cell)); + if (ret) + goto fail_device; + + memzero(res, sizeof(res)); + for (r = 0; r < cell->num_resources; r++) { + res[r].name = cell->resources[r].name; + res[r].flags = cell->resources[r].flags; + + /* Find out base to use */ + if (cell->resources[r].flags & IORESOURCE_MEM) { + res[r].parent = mem_base; + res[r].start = mem_base->start + + cell->resources[r].start; + res[r].end = mem_base->start + + cell->resources[r].end; + } else if (cell->resources[r].flags & IORESOURCE_IRQ) { + res[r].start = irq_base + + cell->resources[r].start; + res[r].end = irq_base + + cell->resources[r].end; + } else { + res[r].parent = cell->resources[r].parent; + res[r].start = cell->resources[r].start; + res[r].end = cell->resources[r].end; + } + } + + platform_device_add_resources(pdev, res, cell->num_resources); + + ret = platform_device_add(pdev); + if (ret) + goto fail_device; + + return 0; + +/* platform_device_del(pdev); */ +fail_device: + platform_device_put(pdev); +fail_alloc: + return ret; +} + +int mfd_add_devices( + struct platform_device *parent, + const struct mfd_cell *cells, int n_devs, + struct resource *mem_base, + int irq_base) +{ + int i; + int ret = 0; + + for (i = 0; i < n_devs; i++) { + ret = mfd_add_device(parent, cells + i, mem_base, irq_base); + if (ret) + break; + } + + if (ret) + mfd_remove_devices(parent); + + return ret; +} +EXPORT_SYMBOL(mfd_add_devices); + +static int mfd_remove_devices_fn(struct device *dev, void *unused) +{ + platform_device_unregister( + container_of(dev, struct platform_device, dev)); + return 0; +} + +void mfd_remove_devices(struct platform_device *parent) +{ + device_for_each_child(&parent->dev, NULL, mfd_remove_devices_fn); +} +EXPORT_SYMBOL(mfd_remove_devices); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Ian Molton, Dmitry Baryshkov"); diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h new file mode 100644 index 00000000000..bb3dd054592 --- /dev/null +++ b/include/linux/mfd/core.h @@ -0,0 +1,55 @@ +#ifndef MFD_CORE_H +#define MFD_CORE_H +/* + * drivers/mfd/mfd-core.h + * + * core MFD support + * Copyright (c) 2006 Ian Molton + * Copyright (c) 2007 Dmitry Baryshkov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include + +/* + * This struct describes the MFD part ("cell"). + * After registration the copy of this structure will become the platform data + * of the resulting platform_device + */ +struct mfd_cell { + const char *name; + + int (*enable)(struct platform_device *dev); + int (*disable)(struct platform_device *dev); + int (*suspend)(struct platform_device *dev); + int (*resume)(struct platform_device *dev); + + void *driver_data; /* driver-specific data */ + + /* + * This resources can be specified relatievly to the parent device. + * For accessing device you should use resources from device + */ + int num_resources; + const struct resource *resources; +}; + +static inline struct mfd_cell * +mfd_get_cell(struct platform_device *pdev) +{ + return (struct mfd_cell *)pdev->dev.platform_data; +} + +extern int mfd_add_devices( + struct platform_device *parent, + const struct mfd_cell *cells, int n_devs, + struct resource *mem_base, + int irq_base); + +extern void mfd_remove_devices(struct platform_device *parent); + +#endif From f024ff10b1ab13da4d626366019fd05c49721af7 Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Fri, 27 Jun 2008 10:37:57 +0100 Subject: [PATCH 0038/1435] [ARM] 5128/1: tc6393xb: tmio-nand support Signed-off-by: Dmitry Baryshkov Signed-off-by: Russell King --- drivers/mfd/Kconfig | 1 + drivers/mfd/tc6393xb.c | 63 ++++++++++++++++++++++++++++++++++++ include/linux/mfd/tc6393xb.h | 2 ++ include/linux/mfd/tmio.h | 17 ++++++++++ 4 files changed, 83 insertions(+) create mode 100644 include/linux/mfd/tmio.h diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 8ebc0be1095..7dff105e8f8 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -45,6 +45,7 @@ config HTC_PASIC3 config MFD_TC6393XB bool "Support Toshiba TC6393XB" depends on HAVE_GPIO_LIB + select MFD_CORE help Support for Toshiba Mobile IO Controller TC6393XB diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c index 4d7192edefe..2d87501b6fd 100644 --- a/drivers/mfd/tc6393xb.c +++ b/drivers/mfd/tc6393xb.c @@ -21,6 +21,8 @@ #include #include #include +#include +#include #include #include @@ -108,6 +110,59 @@ struct tc6393xb { int irq_base; }; +enum { + TC6393XB_CELL_NAND, +}; + +/*--------------------------------------------------------------------------*/ + +static int tc6393xb_nand_enable(struct platform_device *nand) +{ + struct platform_device *dev = to_platform_device(nand->dev.parent); + struct tc6393xb *tc6393xb = platform_get_drvdata(dev); + unsigned long flags; + + spin_lock_irqsave(&tc6393xb->lock, flags); + + /* SMD buffer on */ + dev_dbg(&dev->dev, "SMD buffer on\n"); + iowrite8(0xff, tc6393xb->scr + SCR_GPI_BCR(1)); + + spin_unlock_irqrestore(&tc6393xb->lock, flags); + + return 0; +} + +static struct resource __devinitdata tc6393xb_nand_resources[] = { + { + .name = TMIO_NAND_CONFIG, + .start = 0x0100, + .end = 0x01ff, + .flags = IORESOURCE_MEM, + }, + { + .name = TMIO_NAND_CONTROL, + .start = 0x1000, + .end = 0x1007, + .flags = IORESOURCE_MEM, + }, + { + .name = TMIO_NAND_IRQ, + .start = IRQ_TC6393_NAND, + .end = IRQ_TC6393_NAND, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct mfd_cell __devinitdata tc6393xb_cells[] = { + [TC6393XB_CELL_NAND] = { + .name = "tmio-nand", + .enable = tc6393xb_nand_enable, + .num_resources = ARRAY_SIZE(tc6393xb_nand_resources), + .resources = tc6393xb_nand_resources, + }, +}; + /*--------------------------------------------------------------------------*/ static int tc6393xb_gpio_get(struct gpio_chip *chip, @@ -410,6 +465,12 @@ static int __devinit tc6393xb_probe(struct platform_device *dev) if (tc6393xb->irq) tc6393xb_attach_irq(dev); + tc6393xb_cells[TC6393XB_CELL_NAND].driver_data = tcpd->nand_data; + + retval = mfd_add_devices(dev, + tc6393xb_cells, ARRAY_SIZE(tc6393xb_cells), + iomem, tcpd->irq_base); + return 0; if (tc6393xb->irq) @@ -440,6 +501,8 @@ static int __devexit tc6393xb_remove(struct platform_device *dev) struct tc6393xb *tc6393xb = platform_get_drvdata(dev); int ret; + mfd_remove_devices(dev); + if (tc6393xb->irq) tc6393xb_detach_irq(dev); diff --git a/include/linux/mfd/tc6393xb.h b/include/linux/mfd/tc6393xb.h index 0e3dd4ca523..7cc824a58f7 100644 --- a/include/linux/mfd/tc6393xb.h +++ b/include/linux/mfd/tc6393xb.h @@ -31,6 +31,8 @@ struct tc6393xb_platform_data { int irq_base; /* a base for cascaded irq */ int gpio_base; + + struct tmio_nand_data *nand_data; }; /* diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h new file mode 100644 index 00000000000..9438d8c9ac1 --- /dev/null +++ b/include/linux/mfd/tmio.h @@ -0,0 +1,17 @@ +#ifndef MFD_TMIO_H +#define MFD_TMIO_H + +/* + * data for the NAND controller + */ +struct tmio_nand_data { + struct nand_bbt_descr *badblock_pattern; + struct mtd_partition *partition; + unsigned int num_partitions; +}; + +#define TMIO_NAND_CONFIG "tmio-nand-config" +#define TMIO_NAND_CONTROL "tmio-nand-control" +#define TMIO_NAND_IRQ "tmio-nand" + +#endif From 5289fecda021828af85fccc8cc2613a85d0fcf52 Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Fri, 27 Jun 2008 10:38:44 +0100 Subject: [PATCH 0039/1435] [ARM] 5129/1: tosa: tmio-nand data Signed-off-by: Dmitry Baryshkov Signed-off-by: Russell King --- arch/arm/mach-pxa/tosa.c | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c index f3b12d2897a..fa903e7ae62 100644 --- a/arch/arm/mach-pxa/tosa.c +++ b/arch/arm/mach-pxa/tosa.c @@ -22,6 +22,9 @@ #include #include #include +#include +#include +#include #include #include #include @@ -666,6 +669,39 @@ static int tosa_tc6393xb_suspend(struct platform_device *dev) return 0; } +static struct mtd_partition tosa_nand_partition[] = { + { + .name = "smf", + .offset = 0, + .size = 7 * 1024 * 1024, + }, + { + .name = "root", + .offset = MTDPART_OFS_APPEND, + .size = 28 * 1024 * 1024, + }, + { + .name = "home", + .offset = MTDPART_OFS_APPEND, + .size = MTDPART_SIZ_FULL, + }, +}; + +static uint8_t scan_ff_pattern[] = { 0xff, 0xff }; + +static struct nand_bbt_descr tosa_tc6393xb_nand_bbt = { + .options = 0, + .offs = 4, + .len = 2, + .pattern = scan_ff_pattern +}; + +static struct tmio_nand_data tosa_tc6393xb_nand_config = { + .num_partitions = ARRAY_SIZE(tosa_nand_partition), + .partition = tosa_nand_partition, + .badblock_pattern = &tosa_tc6393xb_nand_bbt, +}; + static struct tc6393xb_platform_data tosa_tc6393xb_setup = { .scr_pll2cr = 0x0cc1, .scr_gper = 0x3300, @@ -681,6 +717,8 @@ static struct tc6393xb_platform_data tosa_tc6393xb_setup = { .disable = tosa_tc6393xb_disable, .suspend = tosa_tc6393xb_suspend, .resume = tosa_tc6393xb_resume, + + .nand_data = &tosa_tc6393xb_nand_config, }; From 16b32fd0a35b30cbd67395cfcbd360354db44f39 Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Sat, 5 Jul 2008 09:02:48 +0100 Subject: [PATCH 0040/1435] [ARM] 5150/1: Tosa: support built-in bluetooth power-up The driver is pretty much generic and will be later shared with a few other devices, like hx4700 ipaq. Signed-off-by: Dmitry Baryshkov Signed-off-by: Russell King --- arch/arm/mach-pxa/Kconfig | 8 ++ arch/arm/mach-pxa/Makefile | 2 + arch/arm/mach-pxa/tosa-bt.c | 150 +++++++++++++++++++++++++++++ arch/arm/mach-pxa/tosa.c | 17 +++- include/asm-arm/arch-pxa/tosa.h | 7 +- include/asm-arm/arch-pxa/tosa_bt.h | 22 +++++ 6 files changed, 201 insertions(+), 5 deletions(-) create mode 100644 arch/arm/mach-pxa/tosa-bt.c create mode 100644 include/asm-arm/arch-pxa/tosa_bt.h diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig index 5da7a682049..6c162f8fdce 100644 --- a/arch/arm/mach-pxa/Kconfig +++ b/arch/arm/mach-pxa/Kconfig @@ -273,4 +273,12 @@ config PXA_SSP tristate help Enable support for PXA2xx SSP ports + +config TOSA_BT + tristate "Control the state of built-in bluetooth chip on Sharp SL-6000" + depends on MACH_TOSA + select RFKILL + help + This is a simple driver that is able to control + the state of built in bluetooth chip on tosa. endif diff --git a/arch/arm/mach-pxa/Makefile b/arch/arm/mach-pxa/Makefile index c9e66fbe622..18e0b249f7c 100644 --- a/arch/arm/mach-pxa/Makefile +++ b/arch/arm/mach-pxa/Makefile @@ -58,3 +58,5 @@ obj-$(CONFIG_LEDS) += $(led-y) ifeq ($(CONFIG_PCI),y) obj-$(CONFIG_MACH_ARMCORE) += cm-x270-pci.o endif + +obj-$(CONFIG_TOSA_BT) += tosa-bt.o diff --git a/arch/arm/mach-pxa/tosa-bt.c b/arch/arm/mach-pxa/tosa-bt.c new file mode 100644 index 00000000000..7d8505466e5 --- /dev/null +++ b/arch/arm/mach-pxa/tosa-bt.c @@ -0,0 +1,150 @@ +/* + * Bluetooth built-in chip control + * + * Copyright (c) 2008 Dmitry Baryshkov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include +#include +#include +#include +#include +#include + +#include + +static void tosa_bt_on(struct tosa_bt_data *data) +{ + gpio_set_value(data->gpio_reset, 0); + gpio_set_value(data->gpio_pwr, 1); + gpio_set_value(data->gpio_reset, 1); + mdelay(20); + gpio_set_value(data->gpio_reset, 0); +} + +static void tosa_bt_off(struct tosa_bt_data *data) +{ + gpio_set_value(data->gpio_reset, 1); + mdelay(10); + gpio_set_value(data->gpio_pwr, 0); + gpio_set_value(data->gpio_reset, 0); +} + +static int tosa_bt_toggle_radio(void *data, enum rfkill_state state) +{ + pr_info("BT_RADIO going: %s\n", + state == RFKILL_STATE_ON ? "on" : "off"); + + if (state == RFKILL_STATE_ON) { + pr_info("TOSA_BT: going ON\n"); + tosa_bt_on(data); + } else { + pr_info("TOSA_BT: going OFF\n"); + tosa_bt_off(data); + } + return 0; +} + +static int tosa_bt_probe(struct platform_device *dev) +{ + int rc; + struct rfkill *rfk; + + struct tosa_bt_data *data = dev->dev.platform_data; + + rc = gpio_request(data->gpio_reset, "Bluetooth reset"); + if (rc) + goto err_reset; + rc = gpio_direction_output(data->gpio_reset, 0); + if (rc) + goto err_reset_dir; + rc = gpio_request(data->gpio_pwr, "Bluetooth power"); + if (rc) + goto err_pwr; + rc = gpio_direction_output(data->gpio_pwr, 0); + if (rc) + goto err_pwr_dir; + + rfk = rfkill_allocate(&dev->dev, RFKILL_TYPE_BLUETOOTH); + if (!rfk) { + rc = -ENOMEM; + goto err_rfk_alloc; + } + + rfk->name = "tosa-bt"; + rfk->toggle_radio = tosa_bt_toggle_radio; + rfk->data = data; +#ifdef CONFIG_RFKILL_LEDS + rfk->led_trigger.name = "tosa-bt"; +#endif + + rc = rfkill_register(rfk); + if (rc) + goto err_rfkill; + + platform_set_drvdata(dev, rfk); + + return 0; + +err_rfkill: + if (rfk) + rfkill_free(rfk); + rfk = NULL; +err_rfk_alloc: + tosa_bt_off(data); +err_pwr_dir: + gpio_free(data->gpio_pwr); +err_pwr: +err_reset_dir: + gpio_free(data->gpio_reset); +err_reset: + return rc; +} + +static int __devexit tosa_bt_remove(struct platform_device *dev) +{ + struct tosa_bt_data *data = dev->dev.platform_data; + struct rfkill *rfk = platform_get_drvdata(dev); + + platform_set_drvdata(dev, NULL); + + if (rfk) + rfkill_unregister(rfk); + rfk = NULL; + + tosa_bt_off(data); + + gpio_free(data->gpio_pwr); + gpio_free(data->gpio_reset); + + return 0; +} + +static struct platform_driver tosa_bt_driver = { + .probe = tosa_bt_probe, + .remove = __devexit_p(tosa_bt_remove), + + .driver = { + .name = "tosa-bt", + .owner = THIS_MODULE, + }, +}; + + +static int __init tosa_bt_init(void) +{ + return platform_driver_register(&tosa_bt_driver); +} + +static void __exit tosa_bt_exit(void) +{ + platform_driver_unregister(&tosa_bt_driver); +} + +module_init(tosa_bt_init); +module_exit(tosa_bt_exit); diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c index fa903e7ae62..5a9d329f504 100644 --- a/arch/arm/mach-pxa/tosa.c +++ b/arch/arm/mach-pxa/tosa.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include @@ -40,6 +41,7 @@ #include #include #include +#include #include #include @@ -562,7 +564,7 @@ static struct gpio_led tosa_gpio_leds[] = { }, { .name = "tosa:blue:bluetooth", - .default_trigger = "none", + .default_trigger = "tosa-bt", .gpio = TOSA_GPIO_BT_LED, }, }; @@ -732,6 +734,18 @@ static struct platform_device tc6393xb_device = { .resource = tc6393xb_resources, }; +static struct tosa_bt_data tosa_bt_data = { + .gpio_pwr = TOSA_GPIO_BT_PWR_EN, + .gpio_reset = TOSA_GPIO_BT_RESET, +}; + +static struct platform_device tosa_bt_device = { + .name = "tosa-bt", + .id = -1, + .dev.platform_data = &tosa_bt_data, +}; + + static struct platform_device *devices[] __initdata = { &tosascoop_device, &tosascoop_jc_device, @@ -740,6 +754,7 @@ static struct platform_device *devices[] __initdata = { &tosakbd_device, &tosa_gpio_keys_device, &tosaled_device, + &tosa_bt_device, }; static void tosa_poweroff(void) diff --git a/include/asm-arm/arch-pxa/tosa.h b/include/asm-arm/arch-pxa/tosa.h index a16c103b725..a72803f0461 100644 --- a/include/asm-arm/arch-pxa/tosa.h +++ b/include/asm-arm/arch-pxa/tosa.h @@ -30,14 +30,13 @@ #define TOSA_GPIO_SD_WP (TOSA_SCOOP_GPIO_BASE + 3) #define TOSA_GPIO_PWR_ON (TOSA_SCOOP_GPIO_BASE + 4) #define TOSA_SCOOP_AUD_PWR_ON SCOOP_GPCR_PA16 -#define TOSA_SCOOP_BT_RESET SCOOP_GPCR_PA17 -#define TOSA_SCOOP_BT_PWR_EN SCOOP_GPCR_PA18 +#define TOSA_GPIO_BT_RESET (TOSA_SCOOP_GPIO_BASE + 6) +#define TOSA_GPIO_BT_PWR_EN (TOSA_SCOOP_GPIO_BASE + 7) #define TOSA_SCOOP_AC_IN_OL SCOOP_GPCR_PA19 /* GPIO Direction 1 : output mode / 0:input mode */ #define TOSA_SCOOP_IO_DIR (TOSA_SCOOP_PXA_VCORE1 | \ - TOSA_SCOOP_AUD_PWR_ON |\ - TOSA_SCOOP_BT_RESET | TOSA_SCOOP_BT_PWR_EN) + TOSA_SCOOP_AUD_PWR_ON) /* * SCOOP2 jacket GPIOs diff --git a/include/asm-arm/arch-pxa/tosa_bt.h b/include/asm-arm/arch-pxa/tosa_bt.h new file mode 100644 index 00000000000..efc3c3d3b75 --- /dev/null +++ b/include/asm-arm/arch-pxa/tosa_bt.h @@ -0,0 +1,22 @@ +/* + * Tosa bluetooth built-in chip control. + * + * Later it may be shared with some other platforms. + * + * Copyright (c) 2008 Dmitry Baryshkov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ +#ifndef TOSA_BT_H +#define TOSA_BT_H + +struct tosa_bt_data { + int gpio_pwr; + int gpio_reset; +}; + +#endif + From 0121702dbaf961b4c996f2579d4286a0a1ed877c Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Mon, 7 Jul 2008 10:09:29 +0100 Subject: [PATCH 0041/1435] [ARM] 5151/1: Tosa: remove double inclusion of linux/delay.h The patch applies to the linux-next tree. Signed-off-by: Alexander Beregalov Acked-by: Dmitry Baryshkov Signed-off-by: Russell King --- arch/arm/mach-pxa/tosa.c | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c index 5a9d329f504..2d49de572ba 100644 --- a/arch/arm/mach-pxa/tosa.c +++ b/arch/arm/mach-pxa/tosa.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include #include From 938870491f0cad030b358af47e28d124b72702d1 Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Mon, 7 Jul 2008 10:50:06 +0100 Subject: [PATCH 0042/1435] [ARM] 5152/1: Add myself to tosa maintainers list Since the beginning of this year I'm providing fixes and updates for the Sharp SL-6000 (tosa). Add myself as one of the maintainers of the machine. Cc: Dirk Opfer Signed-off-by: Dmitry Baryshkov Acked-by: Eric Miao Signed-off-by: Russell King --- MAINTAINERS | 2 ++ 1 file changed, 2 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index c68a1189140..616b97dca11 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -567,6 +567,8 @@ L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) S: Maintained ARM/TOSA MACHINE SUPPORT +P: Dmitry Baryshkov +M: dbaryshkov@gmail.com P: Dirk Opfer M: dirk@opfer-online.de S: Maintained From 51ee87f27a1d2c0e08492924f2fb0223c4c704d9 Mon Sep 17 00:00:00 2001 From: Li Yang Date: Thu, 29 May 2008 23:25:45 -0700 Subject: [PATCH 0043/1435] fsldma: fix incorrect exit path for initialization Signed-off-by: Li Yang Acked-by: Zhang Wei Signed-off-by: Dan Williams --- drivers/dma/fsldma.c | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 054eabffc18..724f6fdd0af 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -809,8 +809,7 @@ static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan) if (!src) { dev_err(fsl_chan->dev, "selftest: Cannot alloc memory for test!\n"); - err = -ENOMEM; - goto out; + return -ENOMEM; } dest = src + test_size; @@ -842,7 +841,7 @@ static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan) if (fsl_dma_is_complete(chan, cookie, NULL, NULL) != DMA_SUCCESS) { dev_err(fsl_chan->dev, "selftest: Time out!\n"); err = -ENODEV; - goto out; + goto free_resources; } /* Test free and re-alloc channel resources */ @@ -927,8 +926,7 @@ static int __devinit of_fsl_dma_chan_probe(struct of_device *dev, if (!new_fsl_chan) { dev_err(&dev->dev, "No free memory for allocating " "dma channels!\n"); - err = -ENOMEM; - goto err; + return -ENOMEM; } /* get dma channel register base */ @@ -936,7 +934,7 @@ static int __devinit of_fsl_dma_chan_probe(struct of_device *dev, if (err) { dev_err(&dev->dev, "Can't get %s property 'reg'\n", dev->node->full_name); - goto err; + goto err_no_reg; } new_fsl_chan->feature = *(u32 *)match->data; @@ -958,7 +956,7 @@ static int __devinit of_fsl_dma_chan_probe(struct of_device *dev, dev_err(&dev->dev, "There is no %d channel!\n", new_fsl_chan->id); err = -EINVAL; - goto err; + goto err_no_chan; } fdev->chan[new_fsl_chan->id] = new_fsl_chan; tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet, @@ -997,23 +995,26 @@ static int __devinit of_fsl_dma_chan_probe(struct of_device *dev, if (err) { dev_err(&dev->dev, "DMA channel %s request_irq error " "with return %d\n", dev->node->full_name, err); - goto err; + goto err_no_irq; } } err = fsl_dma_self_test(new_fsl_chan); if (err) - goto err; + goto err_self_test; dev_info(&dev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id, match->compatible, new_fsl_chan->irq); return 0; -err: - dma_halt(new_fsl_chan); - iounmap(new_fsl_chan->reg_base); + +err_self_test: free_irq(new_fsl_chan->irq, new_fsl_chan); +err_no_irq: list_del(&new_fsl_chan->common.device_node); +err_no_chan: + iounmap(new_fsl_chan->reg_base); +err_no_reg: kfree(new_fsl_chan); return err; } @@ -1054,8 +1055,7 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev, fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL); if (!fdev) { dev_err(&dev->dev, "No enough memory for 'priv'\n"); - err = -ENOMEM; - goto err; + return -ENOMEM; } fdev->dev = &dev->dev; INIT_LIST_HEAD(&fdev->common.channels); @@ -1065,7 +1065,7 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev, if (err) { dev_err(&dev->dev, "Can't get %s property 'reg'\n", dev->node->full_name); - goto err; + goto err_no_reg; } dev_info(&dev->dev, "Probe the Freescale DMA driver for %s " @@ -1103,6 +1103,7 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev, err: iounmap(fdev->reg_base); +err_no_reg: kfree(fdev); return err; } From 65bc3ffe8c067e387fe5557bc3ea5071071f6af9 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 5 Jun 2008 23:26:11 -0700 Subject: [PATCH 0044/1435] async_tx: fix async_memset compile error commit 636bdeaa 'dmaengine: ack to flags: make use of the unused bits in the 'ack' field' missed an ->ack conversion in crypto/async_tx/async_memset.c Signed-off-by: Dan Williams --- crypto/async_tx/async_memset.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c index f5ff3906b03..27a97dc90a7 100644 --- a/crypto/async_tx/async_memset.c +++ b/crypto/async_tx/async_memset.c @@ -76,7 +76,7 @@ async_memset(struct page *dest, int val, unsigned int offset, /* if ack is already set then we cannot be sure * we are referring to the correct operation */ - BUG_ON(depend_tx->ack); + BUG_ON(async_tx_test_ack(depend_tx)); if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) panic("%s: DMA_ERROR waiting for depend_tx\n", __func__); From 1099dc79245719c046e632212ec09d6ec1154ef5 Mon Sep 17 00:00:00 2001 From: Haavard Skinnemoen Date: Tue, 8 Jul 2008 11:58:05 -0700 Subject: [PATCH 0045/1435] dmaengine: Couple DMA channels to their physical DMA device Set the 'parent' field of channel class devices to point to the physical DMA device initialized by the DMA engine driver. This allows drivers to use chan->dev.parent for syncing DMA buffers and adds a 'device' symlink to the real device in /sys/class/dma/dmaXchanY. Signed-off-by: Haavard Skinnemoen Signed-off-by: Dan Williams --- drivers/dma/dmaengine.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 97b329e7679..99c22b42bad 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -378,7 +378,7 @@ int dma_async_device_register(struct dma_device *device) chan->chan_id = chancnt++; chan->dev.class = &dma_devclass; - chan->dev.parent = NULL; + chan->dev.parent = device->dev; snprintf(chan->dev.bus_id, BUS_ID_SIZE, "dma%dchan%d", device->dev_id, chan->chan_id); From 9c402f4e196290692d998b188f9094deb1619e57 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Fri, 27 Jun 2008 01:21:11 -0700 Subject: [PATCH 0046/1435] dmaengine: remove arch dependency from DMADEVICES The dependency is redundant since all drivers set their specific arch dependencies. The NET_DMA option is modified to be enabled only on platforms where it is known to have a positive effect. HAS_DMA is added as an explicit dependency for the DMADEVICES menu. Acked-by: Adrian Bunk Acked-by: Haavard Skinnemoen Signed-off-by: Dan Williams --- drivers/dma/Kconfig | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 6239c3df30a..e4dd0065da3 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -4,13 +4,14 @@ menuconfig DMADEVICES bool "DMA Engine support" - depends on (PCI && X86) || ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX || PPC - depends on !HIGHMEM64G + depends on !HIGHMEM64G && HAS_DMA help DMA engines can do asynchronous data transfers without involving the host CPU. Currently, this framework can be used to offload memory copies in the network stack and - RAID operations in the MD driver. + RAID operations in the MD driver. This menu only presents + DMA Device drivers supported by the configured arch, it may + be empty in some cases. if DMADEVICES @@ -55,10 +56,12 @@ comment "DMA Clients" config NET_DMA bool "Network: TCP receive copy offload" depends on DMA_ENGINE && NET + default (INTEL_IOATDMA || FSL_DMA) help This enables the use of DMA engines in the network stack to offload receive copy-to-user operations, freeing CPU cycles. - Since this is the main user of the DMA engine, it should be enabled; - say Y here. + + Say Y here if you enabled INTEL_IOATDMA or FSL_DMA, otherwise + say N. endif From 7cc5bf9a3a84e5a02e23e5739fb894790b37c101 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Jul 2008 11:58:21 -0700 Subject: [PATCH 0047/1435] dmaengine: track the number of clients using a channel Haavard's dma-slave interface would like to test for exclusive access to a channel. The standard channel refcounting is not sufficient in that it tracks more than just client references, it is also inaccurate as reference counts are percpu until the channel is removed. This change also enables a future fix to deallocate resources when a client declines to use a capable channel. Acked-by: Haavard Skinnemoen Signed-off-by: Dan Williams --- drivers/dma/dmaengine.c | 14 ++++++++++---- include/linux/dmaengine.h | 2 ++ 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 99c22b42bad..10de69eb1a3 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -183,9 +183,10 @@ static void dma_client_chan_alloc(struct dma_client *client) /* we are done once this client rejects * an available resource */ - if (ack == DMA_ACK) + if (ack == DMA_ACK) { dma_chan_get(chan); - else if (ack == DMA_NAK) + chan->client_count++; + } else if (ack == DMA_NAK) return; } } @@ -272,8 +273,10 @@ static void dma_clients_notify_removed(struct dma_chan *chan) /* client was holding resources for this channel so * free it */ - if (ack == DMA_ACK) + if (ack == DMA_ACK) { dma_chan_put(chan); + chan->client_count--; + } } mutex_unlock(&dma_list_mutex); @@ -313,8 +316,10 @@ void dma_async_client_unregister(struct dma_client *client) ack = client->event_callback(client, chan, DMA_RESOURCE_REMOVED); - if (ack == DMA_ACK) + if (ack == DMA_ACK) { dma_chan_put(chan); + chan->client_count--; + } } list_del(&client->global_node); @@ -394,6 +399,7 @@ int dma_async_device_register(struct dma_device *device) kref_get(&device->refcount); kref_get(&device->refcount); kref_init(&chan->refcount); + chan->client_count = 0; chan->slow_ref = 0; INIT_RCU_HEAD(&chan->rcu); } diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index d08a5c5eb92..6432b834322 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -139,6 +139,7 @@ struct dma_chan_percpu { * @rcu: the DMA channel's RCU head * @device_node: used to add this to the device chan list * @local: per-cpu pointer to a struct dma_chan_percpu + * @client-count: how many clients are using this channel */ struct dma_chan { struct dma_device *device; @@ -154,6 +155,7 @@ struct dma_chan { struct list_head device_node; struct dma_chan_percpu *local; + int client_count; }; #define to_dma_chan(p) container_of(p, struct dma_chan, dev) From ebabe2762607147d28aa395ea6df2a0ee7f795a1 Mon Sep 17 00:00:00 2001 From: Kay Sievers Date: Tue, 8 Jul 2008 11:58:28 -0700 Subject: [PATCH 0048/1435] iop-adma: fix platform driver hotplug/coldplug Since 43cc71eed1250755986da4c0f9898f9a635cb3bf, the platform modalias is prefixed with "platform:". Add MODULE_ALIAS() to most of the hotpluggable platform drivers, to re-enable auto loading. Cc: Signed-off-by: Kay Sievers Signed-off-by: David Brownell Signed-off-by: Andrew Morton Signed-off-by: Dan Williams --- drivers/dma/iop-adma.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index 0ec0f431e6a..4e6b052c065 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c @@ -1387,6 +1387,8 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan) spin_unlock_bh(&iop_chan->lock); } +MODULE_ALIAS("platform:iop-adma"); + static struct platform_driver iop_adma_driver = { .probe = iop_adma_probe, .remove = iop_adma_remove, From ff7b04796d9866327ea76e1393f1e902ef032f84 Mon Sep 17 00:00:00 2001 From: Saeed Bishara Date: Tue, 8 Jul 2008 11:58:36 -0700 Subject: [PATCH 0049/1435] dmaengine: DMA engine driver for Marvell XOR engine The XOR engine found in Marvell's SoCs and system controllers provides XOR and DMA operation, iSCSI CRC32C calculation, memory initialization, and memory ECC error cleanup operation support. This driver implements the DMA engine API and supports the following capabilities: - memcpy - xor - memset The XOR engine can be used by DMA engine clients implemented in the kernel, one of those clients is the RAID module. In that case, I observed 20% improvement in the raid5 write throughput, and 40% decrease in the CPU utilization when doing array construction, those results obtained on an 5182 running at 500Mhz. When enabling the NET DMA client, the performance decreased, so meanwhile it is recommended to keep this client off. Signed-off-by: Saeed Bishara Signed-off-by: Lennert Buytenhek Signed-off-by: Nicolas Pitre Acked-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/Kconfig | 8 + drivers/dma/Makefile | 1 + drivers/dma/mv_xor.c | 1364 +++++++++++++++++++++++++++ drivers/dma/mv_xor.h | 183 ++++ include/asm-arm/plat-orion/mv_xor.h | 28 + 5 files changed, 1584 insertions(+) create mode 100644 drivers/dma/mv_xor.c create mode 100644 drivers/dma/mv_xor.h create mode 100644 include/asm-arm/plat-orion/mv_xor.h diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index e4dd0065da3..5af8b1cfc1e 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -47,6 +47,14 @@ config FSL_DMA MPC8560/40, MPC8555, MPC8548 and MPC8641 processors. The MPC8349, MPC8360 is also supported. +config MV_XOR + bool "Marvell XOR engine support" + depends on PLAT_ORION + select ASYNC_CORE + select DMA_ENGINE + ---help--- + Enable support for the Marvell XOR engine. + config DMA_ENGINE bool diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index c8036d94590..ee272fd329c 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -4,3 +4,4 @@ obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o ioatdma-objs := ioat.o ioat_dma.o ioat_dca.o obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o obj-$(CONFIG_FSL_DMA) += fsldma.o +obj-$(CONFIG_MV_XOR) += mv_xor.o diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c new file mode 100644 index 00000000000..f0c123ce8ae --- /dev/null +++ b/drivers/dma/mv_xor.c @@ -0,0 +1,1364 @@ +/* + * offload engine driver for the Marvell XOR engine + * Copyright (C) 2007, 2008, Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mv_xor.h" + +static void mv_xor_issue_pending(struct dma_chan *chan); + +#define to_mv_xor_chan(chan) \ + container_of(chan, struct mv_xor_chan, common) + +#define to_mv_xor_device(dev) \ + container_of(dev, struct mv_xor_device, common) + +#define to_mv_xor_slot(tx) \ + container_of(tx, struct mv_xor_desc_slot, async_tx) + +static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags) +{ + struct mv_xor_desc *hw_desc = desc->hw_desc; + + hw_desc->status = (1 << 31); + hw_desc->phy_next_desc = 0; + hw_desc->desc_command = (1 << 31); +} + +static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc) +{ + struct mv_xor_desc *hw_desc = desc->hw_desc; + return hw_desc->phy_dest_addr; +} + +static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc, + int src_idx) +{ + struct mv_xor_desc *hw_desc = desc->hw_desc; + return hw_desc->phy_src_addr[src_idx]; +} + + +static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc, + u32 byte_count) +{ + struct mv_xor_desc *hw_desc = desc->hw_desc; + hw_desc->byte_count = byte_count; +} + +static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc, + u32 next_desc_addr) +{ + struct mv_xor_desc *hw_desc = desc->hw_desc; + BUG_ON(hw_desc->phy_next_desc); + hw_desc->phy_next_desc = next_desc_addr; +} + +static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc) +{ + struct mv_xor_desc *hw_desc = desc->hw_desc; + hw_desc->phy_next_desc = 0; +} + +static void mv_desc_set_block_fill_val(struct mv_xor_desc_slot *desc, u32 val) +{ + desc->value = val; +} + +static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc, + dma_addr_t addr) +{ + struct mv_xor_desc *hw_desc = desc->hw_desc; + hw_desc->phy_dest_addr = addr; +} + +static int mv_chan_memset_slot_count(size_t len) +{ + return 1; +} + +#define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c) + +static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc, + int index, dma_addr_t addr) +{ + struct mv_xor_desc *hw_desc = desc->hw_desc; + hw_desc->phy_src_addr[index] = addr; + if (desc->type == DMA_XOR) + hw_desc->desc_command |= (1 << index); +} + +static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan) +{ + return __raw_readl(XOR_CURR_DESC(chan)); +} + +static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan, + u32 next_desc_addr) +{ + __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan)); +} + +static void mv_chan_set_dest_pointer(struct mv_xor_chan *chan, u32 desc_addr) +{ + __raw_writel(desc_addr, XOR_DEST_POINTER(chan)); +} + +static void mv_chan_set_block_size(struct mv_xor_chan *chan, u32 block_size) +{ + __raw_writel(block_size, XOR_BLOCK_SIZE(chan)); +} + +static void mv_chan_set_value(struct mv_xor_chan *chan, u32 value) +{ + __raw_writel(value, XOR_INIT_VALUE_LOW(chan)); + __raw_writel(value, XOR_INIT_VALUE_HIGH(chan)); +} + +static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan) +{ + u32 val = __raw_readl(XOR_INTR_MASK(chan)); + val |= XOR_INTR_MASK_VALUE << (chan->idx * 16); + __raw_writel(val, XOR_INTR_MASK(chan)); +} + +static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan) +{ + u32 intr_cause = __raw_readl(XOR_INTR_CAUSE(chan)); + intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF; + return intr_cause; +} + +static int mv_is_err_intr(u32 intr_cause) +{ + if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9))) + return 1; + + return 0; +} + +static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan) +{ + u32 val = (1 << (1 + (chan->idx * 16))); + dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val); + __raw_writel(val, XOR_INTR_CAUSE(chan)); +} + +static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan) +{ + u32 val = 0xFFFF0000 >> (chan->idx * 16); + __raw_writel(val, XOR_INTR_CAUSE(chan)); +} + +static int mv_can_chain(struct mv_xor_desc_slot *desc) +{ + struct mv_xor_desc_slot *chain_old_tail = list_entry( + desc->chain_node.prev, struct mv_xor_desc_slot, chain_node); + + if (chain_old_tail->type != desc->type) + return 0; + if (desc->type == DMA_MEMSET) + return 0; + + return 1; +} + +static void mv_set_mode(struct mv_xor_chan *chan, + enum dma_transaction_type type) +{ + u32 op_mode; + u32 config = __raw_readl(XOR_CONFIG(chan)); + + switch (type) { + case DMA_XOR: + op_mode = XOR_OPERATION_MODE_XOR; + break; + case DMA_MEMCPY: + op_mode = XOR_OPERATION_MODE_MEMCPY; + break; + case DMA_MEMSET: + op_mode = XOR_OPERATION_MODE_MEMSET; + break; + default: + dev_printk(KERN_ERR, chan->device->common.dev, + "error: unsupported operation %d.\n", + type); + BUG(); + return; + } + + config &= ~0x7; + config |= op_mode; + __raw_writel(config, XOR_CONFIG(chan)); + chan->current_type = type; +} + +static void mv_chan_activate(struct mv_xor_chan *chan) +{ + u32 activation; + + dev_dbg(chan->device->common.dev, " activate chan.\n"); + activation = __raw_readl(XOR_ACTIVATION(chan)); + activation |= 0x1; + __raw_writel(activation, XOR_ACTIVATION(chan)); +} + +static char mv_chan_is_busy(struct mv_xor_chan *chan) +{ + u32 state = __raw_readl(XOR_ACTIVATION(chan)); + + state = (state >> 4) & 0x3; + + return (state == 1) ? 1 : 0; +} + +static int mv_chan_xor_slot_count(size_t len, int src_cnt) +{ + return 1; +} + +/** + * mv_xor_free_slots - flags descriptor slots for reuse + * @slot: Slot to free + * Caller must hold &mv_chan->lock while calling this function + */ +static void mv_xor_free_slots(struct mv_xor_chan *mv_chan, + struct mv_xor_desc_slot *slot) +{ + dev_dbg(mv_chan->device->common.dev, "%s %d slot %p\n", + __func__, __LINE__, slot); + + slot->slots_per_op = 0; + +} + +/* + * mv_xor_start_new_chain - program the engine to operate on new chain headed by + * sw_desc + * Caller must hold &mv_chan->lock while calling this function + */ +static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan, + struct mv_xor_desc_slot *sw_desc) +{ + dev_dbg(mv_chan->device->common.dev, "%s %d: sw_desc %p\n", + __func__, __LINE__, sw_desc); + if (sw_desc->type != mv_chan->current_type) + mv_set_mode(mv_chan, sw_desc->type); + + if (sw_desc->type == DMA_MEMSET) { + /* for memset requests we need to program the engine, no + * descriptors used. + */ + struct mv_xor_desc *hw_desc = sw_desc->hw_desc; + mv_chan_set_dest_pointer(mv_chan, hw_desc->phy_dest_addr); + mv_chan_set_block_size(mv_chan, sw_desc->unmap_len); + mv_chan_set_value(mv_chan, sw_desc->value); + } else { + /* set the hardware chain */ + mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); + } + mv_chan->pending += sw_desc->slot_cnt; + mv_xor_issue_pending(&mv_chan->common); +} + +static dma_cookie_t +mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc, + struct mv_xor_chan *mv_chan, dma_cookie_t cookie) +{ + BUG_ON(desc->async_tx.cookie < 0); + + if (desc->async_tx.cookie > 0) { + cookie = desc->async_tx.cookie; + + /* call the callback (must not sleep or submit new + * operations to this channel) + */ + if (desc->async_tx.callback) + desc->async_tx.callback( + desc->async_tx.callback_param); + + /* unmap dma addresses + * (unmap_single vs unmap_page?) + */ + if (desc->group_head && desc->unmap_len) { + struct mv_xor_desc_slot *unmap = desc->group_head; + struct device *dev = + &mv_chan->device->pdev->dev; + u32 len = unmap->unmap_len; + u32 src_cnt = unmap->unmap_src_cnt; + dma_addr_t addr = mv_desc_get_dest_addr(unmap); + + dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE); + while (src_cnt--) { + addr = mv_desc_get_src_addr(unmap, src_cnt); + dma_unmap_page(dev, addr, len, DMA_TO_DEVICE); + } + desc->group_head = NULL; + } + } + + /* run dependent operations */ + async_tx_run_dependencies(&desc->async_tx); + + return cookie; +} + +static int +mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan) +{ + struct mv_xor_desc_slot *iter, *_iter; + + dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__); + list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, + completed_node) { + + if (async_tx_test_ack(&iter->async_tx)) { + list_del(&iter->completed_node); + mv_xor_free_slots(mv_chan, iter); + } + } + return 0; +} + +static int +mv_xor_clean_slot(struct mv_xor_desc_slot *desc, + struct mv_xor_chan *mv_chan) +{ + dev_dbg(mv_chan->device->common.dev, "%s %d: desc %p flags %d\n", + __func__, __LINE__, desc, desc->async_tx.flags); + list_del(&desc->chain_node); + /* the client is allowed to attach dependent operations + * until 'ack' is set + */ + if (!async_tx_test_ack(&desc->async_tx)) { + /* move this slot to the completed_slots */ + list_add_tail(&desc->completed_node, &mv_chan->completed_slots); + return 0; + } + + mv_xor_free_slots(mv_chan, desc); + return 0; +} + +static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) +{ + struct mv_xor_desc_slot *iter, *_iter; + dma_cookie_t cookie = 0; + int busy = mv_chan_is_busy(mv_chan); + u32 current_desc = mv_chan_get_current_desc(mv_chan); + int seen_current = 0; + + dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__); + dev_dbg(mv_chan->device->common.dev, "current_desc %x\n", current_desc); + mv_xor_clean_completed_slots(mv_chan); + + /* free completed slots from the chain starting with + * the oldest descriptor + */ + + list_for_each_entry_safe(iter, _iter, &mv_chan->chain, + chain_node) { + prefetch(_iter); + prefetch(&_iter->async_tx); + + /* do not advance past the current descriptor loaded into the + * hardware channel, subsequent descriptors are either in + * process or have not been submitted + */ + if (seen_current) + break; + + /* stop the search if we reach the current descriptor and the + * channel is busy + */ + if (iter->async_tx.phys == current_desc) { + seen_current = 1; + if (busy) + break; + } + + cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie); + + if (mv_xor_clean_slot(iter, mv_chan)) + break; + } + + if ((busy == 0) && !list_empty(&mv_chan->chain)) { + struct mv_xor_desc_slot *chain_head; + chain_head = list_entry(mv_chan->chain.next, + struct mv_xor_desc_slot, + chain_node); + + mv_xor_start_new_chain(mv_chan, chain_head); + } + + if (cookie > 0) + mv_chan->completed_cookie = cookie; +} + +static void +mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) +{ + spin_lock_bh(&mv_chan->lock); + __mv_xor_slot_cleanup(mv_chan); + spin_unlock_bh(&mv_chan->lock); +} + +static void mv_xor_tasklet(unsigned long data) +{ + struct mv_xor_chan *chan = (struct mv_xor_chan *) data; + __mv_xor_slot_cleanup(chan); +} + +static struct mv_xor_desc_slot * +mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots, + int slots_per_op) +{ + struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL; + LIST_HEAD(chain); + int slots_found, retry = 0; + + /* start search from the last allocated descrtiptor + * if a contiguous allocation can not be found start searching + * from the beginning of the list + */ +retry: + slots_found = 0; + if (retry == 0) + iter = mv_chan->last_used; + else + iter = list_entry(&mv_chan->all_slots, + struct mv_xor_desc_slot, + slot_node); + + list_for_each_entry_safe_continue( + iter, _iter, &mv_chan->all_slots, slot_node) { + prefetch(_iter); + prefetch(&_iter->async_tx); + if (iter->slots_per_op) { + /* give up after finding the first busy slot + * on the second pass through the list + */ + if (retry) + break; + + slots_found = 0; + continue; + } + + /* start the allocation if the slot is correctly aligned */ + if (!slots_found++) + alloc_start = iter; + + if (slots_found == num_slots) { + struct mv_xor_desc_slot *alloc_tail = NULL; + struct mv_xor_desc_slot *last_used = NULL; + iter = alloc_start; + while (num_slots) { + int i; + + /* pre-ack all but the last descriptor */ + async_tx_ack(&iter->async_tx); + + list_add_tail(&iter->chain_node, &chain); + alloc_tail = iter; + iter->async_tx.cookie = 0; + iter->slot_cnt = num_slots; + iter->xor_check_result = NULL; + for (i = 0; i < slots_per_op; i++) { + iter->slots_per_op = slots_per_op - i; + last_used = iter; + iter = list_entry(iter->slot_node.next, + struct mv_xor_desc_slot, + slot_node); + } + num_slots -= slots_per_op; + } + alloc_tail->group_head = alloc_start; + alloc_tail->async_tx.cookie = -EBUSY; + list_splice(&chain, &alloc_tail->async_tx.tx_list); + mv_chan->last_used = last_used; + mv_desc_clear_next_desc(alloc_start); + mv_desc_clear_next_desc(alloc_tail); + return alloc_tail; + } + } + if (!retry++) + goto retry; + + /* try to free some slots if the allocation fails */ + tasklet_schedule(&mv_chan->irq_tasklet); + + return NULL; +} + +static dma_cookie_t +mv_desc_assign_cookie(struct mv_xor_chan *mv_chan, + struct mv_xor_desc_slot *desc) +{ + dma_cookie_t cookie = mv_chan->common.cookie; + + if (++cookie < 0) + cookie = 1; + mv_chan->common.cookie = desc->async_tx.cookie = cookie; + return cookie; +} + +/************************ DMA engine API functions ****************************/ +static dma_cookie_t +mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx); + struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan); + struct mv_xor_desc_slot *grp_start, *old_chain_tail; + dma_cookie_t cookie; + int new_hw_chain = 1; + + dev_dbg(mv_chan->device->common.dev, + "%s sw_desc %p: async_tx %p\n", + __func__, sw_desc, &sw_desc->async_tx); + + grp_start = sw_desc->group_head; + + spin_lock_bh(&mv_chan->lock); + cookie = mv_desc_assign_cookie(mv_chan, sw_desc); + + if (list_empty(&mv_chan->chain)) + list_splice_init(&sw_desc->async_tx.tx_list, &mv_chan->chain); + else { + new_hw_chain = 0; + + old_chain_tail = list_entry(mv_chan->chain.prev, + struct mv_xor_desc_slot, + chain_node); + list_splice_init(&grp_start->async_tx.tx_list, + &old_chain_tail->chain_node); + + if (!mv_can_chain(grp_start)) + goto submit_done; + + dev_dbg(mv_chan->device->common.dev, "Append to last desc %x\n", + old_chain_tail->async_tx.phys); + + /* fix up the hardware chain */ + mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys); + + /* if the channel is not busy */ + if (!mv_chan_is_busy(mv_chan)) { + u32 current_desc = mv_chan_get_current_desc(mv_chan); + /* + * and the curren desc is the end of the chain before + * the append, then we need to start the channel + */ + if (current_desc == old_chain_tail->async_tx.phys) + new_hw_chain = 1; + } + } + + if (new_hw_chain) + mv_xor_start_new_chain(mv_chan, grp_start); + +submit_done: + spin_unlock_bh(&mv_chan->lock); + + return cookie; +} + +/* returns the number of allocated descriptors */ +static int mv_xor_alloc_chan_resources(struct dma_chan *chan) +{ + char *hw_desc; + int idx; + struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); + struct mv_xor_desc_slot *slot = NULL; + struct mv_xor_platform_data *plat_data = + mv_chan->device->pdev->dev.platform_data; + int num_descs_in_pool = plat_data->pool_size/MV_XOR_SLOT_SIZE; + + /* Allocate descriptor slots */ + idx = mv_chan->slots_allocated; + while (idx < num_descs_in_pool) { + slot = kzalloc(sizeof(*slot), GFP_KERNEL); + if (!slot) { + printk(KERN_INFO "MV XOR Channel only initialized" + " %d descriptor slots", idx); + break; + } + hw_desc = (char *) mv_chan->device->dma_desc_pool_virt; + slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE]; + + dma_async_tx_descriptor_init(&slot->async_tx, chan); + slot->async_tx.tx_submit = mv_xor_tx_submit; + INIT_LIST_HEAD(&slot->chain_node); + INIT_LIST_HEAD(&slot->slot_node); + INIT_LIST_HEAD(&slot->async_tx.tx_list); + hw_desc = (char *) mv_chan->device->dma_desc_pool; + slot->async_tx.phys = + (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE]; + slot->idx = idx++; + + spin_lock_bh(&mv_chan->lock); + mv_chan->slots_allocated = idx; + list_add_tail(&slot->slot_node, &mv_chan->all_slots); + spin_unlock_bh(&mv_chan->lock); + } + + if (mv_chan->slots_allocated && !mv_chan->last_used) + mv_chan->last_used = list_entry(mv_chan->all_slots.next, + struct mv_xor_desc_slot, + slot_node); + + dev_dbg(mv_chan->device->common.dev, + "allocated %d descriptor slots last_used: %p\n", + mv_chan->slots_allocated, mv_chan->last_used); + + return mv_chan->slots_allocated ? : -ENOMEM; +} + +static struct dma_async_tx_descriptor * +mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, + size_t len, unsigned long flags) +{ + struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); + struct mv_xor_desc_slot *sw_desc, *grp_start; + int slot_cnt; + + dev_dbg(mv_chan->device->common.dev, + "%s dest: %x src %x len: %u flags: %ld\n", + __func__, dest, src, len, flags); + if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) + return NULL; + + BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT)); + + spin_lock_bh(&mv_chan->lock); + slot_cnt = mv_chan_memcpy_slot_count(len); + sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); + if (sw_desc) { + sw_desc->type = DMA_MEMCPY; + sw_desc->async_tx.flags = flags; + grp_start = sw_desc->group_head; + mv_desc_init(grp_start, flags); + mv_desc_set_byte_count(grp_start, len); + mv_desc_set_dest_addr(sw_desc->group_head, dest); + mv_desc_set_src_addr(grp_start, 0, src); + sw_desc->unmap_src_cnt = 1; + sw_desc->unmap_len = len; + } + spin_unlock_bh(&mv_chan->lock); + + dev_dbg(mv_chan->device->common.dev, + "%s sw_desc %p async_tx %p\n", + __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0); + + return sw_desc ? &sw_desc->async_tx : NULL; +} + +static struct dma_async_tx_descriptor * +mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, + size_t len, unsigned long flags) +{ + struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); + struct mv_xor_desc_slot *sw_desc, *grp_start; + int slot_cnt; + + dev_dbg(mv_chan->device->common.dev, + "%s dest: %x len: %u flags: %ld\n", + __func__, dest, len, flags); + if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) + return NULL; + + BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT)); + + spin_lock_bh(&mv_chan->lock); + slot_cnt = mv_chan_memset_slot_count(len); + sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); + if (sw_desc) { + sw_desc->type = DMA_MEMSET; + sw_desc->async_tx.flags = flags; + grp_start = sw_desc->group_head; + mv_desc_init(grp_start, flags); + mv_desc_set_byte_count(grp_start, len); + mv_desc_set_dest_addr(sw_desc->group_head, dest); + mv_desc_set_block_fill_val(grp_start, value); + sw_desc->unmap_src_cnt = 1; + sw_desc->unmap_len = len; + } + spin_unlock_bh(&mv_chan->lock); + dev_dbg(mv_chan->device->common.dev, + "%s sw_desc %p async_tx %p \n", + __func__, sw_desc, &sw_desc->async_tx); + return sw_desc ? &sw_desc->async_tx : NULL; +} + +static struct dma_async_tx_descriptor * +mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, + unsigned int src_cnt, size_t len, unsigned long flags) +{ + struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); + struct mv_xor_desc_slot *sw_desc, *grp_start; + int slot_cnt; + + if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) + return NULL; + + BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT)); + + dev_dbg(mv_chan->device->common.dev, + "%s src_cnt: %d len: dest %x %u flags: %ld\n", + __func__, src_cnt, len, dest, flags); + + spin_lock_bh(&mv_chan->lock); + slot_cnt = mv_chan_xor_slot_count(len, src_cnt); + sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); + if (sw_desc) { + sw_desc->type = DMA_XOR; + sw_desc->async_tx.flags = flags; + grp_start = sw_desc->group_head; + mv_desc_init(grp_start, flags); + /* the byte count field is the same as in memcpy desc*/ + mv_desc_set_byte_count(grp_start, len); + mv_desc_set_dest_addr(sw_desc->group_head, dest); + sw_desc->unmap_src_cnt = src_cnt; + sw_desc->unmap_len = len; + while (src_cnt--) + mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]); + } + spin_unlock_bh(&mv_chan->lock); + dev_dbg(mv_chan->device->common.dev, + "%s sw_desc %p async_tx %p \n", + __func__, sw_desc, &sw_desc->async_tx); + return sw_desc ? &sw_desc->async_tx : NULL; +} + +static void mv_xor_free_chan_resources(struct dma_chan *chan) +{ + struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); + struct mv_xor_desc_slot *iter, *_iter; + int in_use_descs = 0; + + mv_xor_slot_cleanup(mv_chan); + + spin_lock_bh(&mv_chan->lock); + list_for_each_entry_safe(iter, _iter, &mv_chan->chain, + chain_node) { + in_use_descs++; + list_del(&iter->chain_node); + } + list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, + completed_node) { + in_use_descs++; + list_del(&iter->completed_node); + } + list_for_each_entry_safe_reverse( + iter, _iter, &mv_chan->all_slots, slot_node) { + list_del(&iter->slot_node); + kfree(iter); + mv_chan->slots_allocated--; + } + mv_chan->last_used = NULL; + + dev_dbg(mv_chan->device->common.dev, "%s slots_allocated %d\n", + __func__, mv_chan->slots_allocated); + spin_unlock_bh(&mv_chan->lock); + + if (in_use_descs) + dev_err(mv_chan->device->common.dev, + "freeing %d in use descriptors!\n", in_use_descs); +} + +/** + * mv_xor_is_complete - poll the status of an XOR transaction + * @chan: XOR channel handle + * @cookie: XOR transaction identifier + */ +static enum dma_status mv_xor_is_complete(struct dma_chan *chan, + dma_cookie_t cookie, + dma_cookie_t *done, + dma_cookie_t *used) +{ + struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); + dma_cookie_t last_used; + dma_cookie_t last_complete; + enum dma_status ret; + + last_used = chan->cookie; + last_complete = mv_chan->completed_cookie; + mv_chan->is_complete_cookie = cookie; + if (done) + *done = last_complete; + if (used) + *used = last_used; + + ret = dma_async_is_complete(cookie, last_complete, last_used); + if (ret == DMA_SUCCESS) { + mv_xor_clean_completed_slots(mv_chan); + return ret; + } + mv_xor_slot_cleanup(mv_chan); + + last_used = chan->cookie; + last_complete = mv_chan->completed_cookie; + + if (done) + *done = last_complete; + if (used) + *used = last_used; + + return dma_async_is_complete(cookie, last_complete, last_used); +} + +static void mv_dump_xor_regs(struct mv_xor_chan *chan) +{ + u32 val; + + val = __raw_readl(XOR_CONFIG(chan)); + dev_printk(KERN_ERR, chan->device->common.dev, + "config 0x%08x.\n", val); + + val = __raw_readl(XOR_ACTIVATION(chan)); + dev_printk(KERN_ERR, chan->device->common.dev, + "activation 0x%08x.\n", val); + + val = __raw_readl(XOR_INTR_CAUSE(chan)); + dev_printk(KERN_ERR, chan->device->common.dev, + "intr cause 0x%08x.\n", val); + + val = __raw_readl(XOR_INTR_MASK(chan)); + dev_printk(KERN_ERR, chan->device->common.dev, + "intr mask 0x%08x.\n", val); + + val = __raw_readl(XOR_ERROR_CAUSE(chan)); + dev_printk(KERN_ERR, chan->device->common.dev, + "error cause 0x%08x.\n", val); + + val = __raw_readl(XOR_ERROR_ADDR(chan)); + dev_printk(KERN_ERR, chan->device->common.dev, + "error addr 0x%08x.\n", val); +} + +static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan, + u32 intr_cause) +{ + if (intr_cause & (1 << 4)) { + dev_dbg(chan->device->common.dev, + "ignore this error\n"); + return; + } + + dev_printk(KERN_ERR, chan->device->common.dev, + "error on chan %d. intr cause 0x%08x.\n", + chan->idx, intr_cause); + + mv_dump_xor_regs(chan); + BUG(); +} + +static irqreturn_t mv_xor_interrupt_handler(int irq, void *data) +{ + struct mv_xor_chan *chan = data; + u32 intr_cause = mv_chan_get_intr_cause(chan); + + dev_dbg(chan->device->common.dev, "intr cause %x\n", intr_cause); + + if (mv_is_err_intr(intr_cause)) + mv_xor_err_interrupt_handler(chan, intr_cause); + + tasklet_schedule(&chan->irq_tasklet); + + mv_xor_device_clear_eoc_cause(chan); + + return IRQ_HANDLED; +} + +static void mv_xor_issue_pending(struct dma_chan *chan) +{ + struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); + + if (mv_chan->pending >= MV_XOR_THRESHOLD) { + mv_chan->pending = 0; + mv_chan_activate(mv_chan); + } +} + +/* + * Perform a transaction to verify the HW works. + */ +#define MV_XOR_TEST_SIZE 2000 + +static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device) +{ + int i; + void *src, *dest; + dma_addr_t src_dma, dest_dma; + struct dma_chan *dma_chan; + dma_cookie_t cookie; + struct dma_async_tx_descriptor *tx; + int err = 0; + struct mv_xor_chan *mv_chan; + + src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); + if (!src) + return -ENOMEM; + + dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); + if (!dest) { + kfree(src); + return -ENOMEM; + } + + /* Fill in src buffer */ + for (i = 0; i < MV_XOR_TEST_SIZE; i++) + ((u8 *) src)[i] = (u8)i; + + /* Start copy, using first DMA channel */ + dma_chan = container_of(device->common.channels.next, + struct dma_chan, + device_node); + if (mv_xor_alloc_chan_resources(dma_chan) < 1) { + err = -ENODEV; + goto out; + } + + dest_dma = dma_map_single(dma_chan->device->dev, dest, + MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); + + src_dma = dma_map_single(dma_chan->device->dev, src, + MV_XOR_TEST_SIZE, DMA_TO_DEVICE); + + tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, + MV_XOR_TEST_SIZE, 0); + cookie = mv_xor_tx_submit(tx); + mv_xor_issue_pending(dma_chan); + async_tx_ack(tx); + msleep(1); + + if (mv_xor_is_complete(dma_chan, cookie, NULL, NULL) != + DMA_SUCCESS) { + dev_printk(KERN_ERR, dma_chan->device->dev, + "Self-test copy timed out, disabling\n"); + err = -ENODEV; + goto free_resources; + } + + mv_chan = to_mv_xor_chan(dma_chan); + dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma, + MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); + if (memcmp(src, dest, MV_XOR_TEST_SIZE)) { + dev_printk(KERN_ERR, dma_chan->device->dev, + "Self-test copy failed compare, disabling\n"); + err = -ENODEV; + goto free_resources; + } + +free_resources: + mv_xor_free_chan_resources(dma_chan); +out: + kfree(src); + kfree(dest); + return err; +} + +#define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */ +static int __devinit +mv_xor_xor_self_test(struct mv_xor_device *device) +{ + int i, src_idx; + struct page *dest; + struct page *xor_srcs[MV_XOR_NUM_SRC_TEST]; + dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; + dma_addr_t dest_dma; + struct dma_async_tx_descriptor *tx; + struct dma_chan *dma_chan; + dma_cookie_t cookie; + u8 cmp_byte = 0; + u32 cmp_word; + int err = 0; + struct mv_xor_chan *mv_chan; + + for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { + xor_srcs[src_idx] = alloc_page(GFP_KERNEL); + if (!xor_srcs[src_idx]) + while (src_idx--) { + __free_page(xor_srcs[src_idx]); + return -ENOMEM; + } + } + + dest = alloc_page(GFP_KERNEL); + if (!dest) + while (src_idx--) { + __free_page(xor_srcs[src_idx]); + return -ENOMEM; + } + + /* Fill in src buffers */ + for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { + u8 *ptr = page_address(xor_srcs[src_idx]); + for (i = 0; i < PAGE_SIZE; i++) + ptr[i] = (1 << src_idx); + } + + for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) + cmp_byte ^= (u8) (1 << src_idx); + + cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | + (cmp_byte << 8) | cmp_byte; + + memset(page_address(dest), 0, PAGE_SIZE); + + dma_chan = container_of(device->common.channels.next, + struct dma_chan, + device_node); + if (mv_xor_alloc_chan_resources(dma_chan) < 1) { + err = -ENODEV; + goto out; + } + + /* test xor */ + dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, + DMA_FROM_DEVICE); + + for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++) + dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], + 0, PAGE_SIZE, DMA_TO_DEVICE); + + tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, + MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0); + + cookie = mv_xor_tx_submit(tx); + mv_xor_issue_pending(dma_chan); + async_tx_ack(tx); + msleep(8); + + if (mv_xor_is_complete(dma_chan, cookie, NULL, NULL) != + DMA_SUCCESS) { + dev_printk(KERN_ERR, dma_chan->device->dev, + "Self-test xor timed out, disabling\n"); + err = -ENODEV; + goto free_resources; + } + + mv_chan = to_mv_xor_chan(dma_chan); + dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma, + PAGE_SIZE, DMA_FROM_DEVICE); + for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { + u32 *ptr = page_address(dest); + if (ptr[i] != cmp_word) { + dev_printk(KERN_ERR, dma_chan->device->dev, + "Self-test xor failed compare, disabling." + " index %d, data %x, expected %x\n", i, + ptr[i], cmp_word); + err = -ENODEV; + goto free_resources; + } + } + +free_resources: + mv_xor_free_chan_resources(dma_chan); +out: + src_idx = MV_XOR_NUM_SRC_TEST; + while (src_idx--) + __free_page(xor_srcs[src_idx]); + __free_page(dest); + return err; +} + +static int __devexit mv_xor_remove(struct platform_device *dev) +{ + struct mv_xor_device *device = platform_get_drvdata(dev); + struct dma_chan *chan, *_chan; + struct mv_xor_chan *mv_chan; + struct mv_xor_platform_data *plat_data = dev->dev.platform_data; + + dma_async_device_unregister(&device->common); + + dma_free_coherent(&dev->dev, plat_data->pool_size, + device->dma_desc_pool_virt, device->dma_desc_pool); + + list_for_each_entry_safe(chan, _chan, &device->common.channels, + device_node) { + mv_chan = to_mv_xor_chan(chan); + list_del(&chan->device_node); + } + + return 0; +} + +static int __devinit mv_xor_probe(struct platform_device *pdev) +{ + int ret = 0; + int irq; + struct mv_xor_device *adev; + struct mv_xor_chan *mv_chan; + struct dma_device *dma_dev; + struct mv_xor_platform_data *plat_data = pdev->dev.platform_data; + + + adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL); + if (!adev) + return -ENOMEM; + + dma_dev = &adev->common; + + /* allocate coherent memory for hardware descriptors + * note: writecombine gives slightly better performance, but + * requires that we explicitly flush the writes + */ + adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev, + plat_data->pool_size, + &adev->dma_desc_pool, + GFP_KERNEL); + if (!adev->dma_desc_pool_virt) + return -ENOMEM; + + adev->id = plat_data->hw_id; + + /* discover transaction capabilites from the platform data */ + dma_dev->cap_mask = plat_data->cap_mask; + adev->pdev = pdev; + platform_set_drvdata(pdev, adev); + + adev->shared = platform_get_drvdata(plat_data->shared); + + INIT_LIST_HEAD(&dma_dev->channels); + + /* set base routines */ + dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources; + dma_dev->device_free_chan_resources = mv_xor_free_chan_resources; + dma_dev->device_is_tx_complete = mv_xor_is_complete; + dma_dev->device_issue_pending = mv_xor_issue_pending; + dma_dev->dev = &pdev->dev; + + /* set prep routines based on capability */ + if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) + dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; + if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) + dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset; + if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { + dma_dev->max_xor = 8; ; + dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; + } + + mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL); + if (!mv_chan) { + ret = -ENOMEM; + goto err_free_dma; + } + mv_chan->device = adev; + mv_chan->idx = plat_data->hw_id; + mv_chan->mmr_base = adev->shared->xor_base; + + if (!mv_chan->mmr_base) { + ret = -ENOMEM; + goto err_free_dma; + } + tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long) + mv_chan); + + /* clear errors before enabling interrupts */ + mv_xor_device_clear_err_status(mv_chan); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + ret = irq; + goto err_free_dma; + } + ret = devm_request_irq(&pdev->dev, irq, + mv_xor_interrupt_handler, + 0, dev_name(&pdev->dev), mv_chan); + if (ret) + goto err_free_dma; + + mv_chan_unmask_interrupts(mv_chan); + + mv_set_mode(mv_chan, DMA_MEMCPY); + + spin_lock_init(&mv_chan->lock); + INIT_LIST_HEAD(&mv_chan->chain); + INIT_LIST_HEAD(&mv_chan->completed_slots); + INIT_LIST_HEAD(&mv_chan->all_slots); + INIT_RCU_HEAD(&mv_chan->common.rcu); + mv_chan->common.device = dma_dev; + + list_add_tail(&mv_chan->common.device_node, &dma_dev->channels); + + if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { + ret = mv_xor_memcpy_self_test(adev); + dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret); + if (ret) + goto err_free_dma; + } + + if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { + ret = mv_xor_xor_self_test(adev); + dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); + if (ret) + goto err_free_dma; + } + + dev_printk(KERN_INFO, &pdev->dev, "Marvell XOR: " + "( %s%s%s%s)\n", + dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", + dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "", + dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", + dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); + + dma_async_device_register(dma_dev); + goto out; + + err_free_dma: + dma_free_coherent(&adev->pdev->dev, plat_data->pool_size, + adev->dma_desc_pool_virt, adev->dma_desc_pool); + out: + return ret; +} + +static void +mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp, + struct mbus_dram_target_info *dram) +{ + void __iomem *base = msp->xor_base; + u32 win_enable = 0; + int i; + + for (i = 0; i < 8; i++) { + writel(0, base + WINDOW_BASE(i)); + writel(0, base + WINDOW_SIZE(i)); + if (i < 4) + writel(0, base + WINDOW_REMAP_HIGH(i)); + } + + for (i = 0; i < dram->num_cs; i++) { + struct mbus_dram_window *cs = dram->cs + i; + + writel((cs->base & 0xffff0000) | + (cs->mbus_attr << 8) | + dram->mbus_dram_target_id, base + WINDOW_BASE(i)); + writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); + + win_enable |= (1 << i); + win_enable |= 3 << (16 + (2 * i)); + } + + writel(win_enable, base + WINDOW_BAR_ENABLE(0)); + writel(win_enable, base + WINDOW_BAR_ENABLE(1)); +} + +static struct platform_driver mv_xor_driver = { + .probe = mv_xor_probe, + .remove = mv_xor_remove, + .driver = { + .owner = THIS_MODULE, + .name = MV_XOR_NAME, + }, +}; + +static int mv_xor_shared_probe(struct platform_device *pdev) +{ + struct mv_xor_platform_shared_data *msd = pdev->dev.platform_data; + struct mv_xor_shared_private *msp; + struct resource *res; + + dev_printk(KERN_NOTICE, &pdev->dev, "Marvell shared XOR driver\n"); + + msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL); + if (!msp) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + + msp->xor_base = devm_ioremap(&pdev->dev, res->start, + res->end - res->start + 1); + if (!msp->xor_base) + return -EBUSY; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!res) + return -ENODEV; + + msp->xor_high_base = devm_ioremap(&pdev->dev, res->start, + res->end - res->start + 1); + if (!msp->xor_high_base) + return -EBUSY; + + platform_set_drvdata(pdev, msp); + + /* + * (Re-)program MBUS remapping windows if we are asked to. + */ + if (msd != NULL && msd->dram != NULL) + mv_xor_conf_mbus_windows(msp, msd->dram); + + return 0; +} + +static int mv_xor_shared_remove(struct platform_device *pdev) +{ + return 0; +} + +static struct platform_driver mv_xor_shared_driver = { + .probe = mv_xor_shared_probe, + .remove = mv_xor_shared_remove, + .driver = { + .owner = THIS_MODULE, + .name = MV_XOR_SHARED_NAME, + }, +}; + + +static int __init mv_xor_init(void) +{ + int rc; + + rc = platform_driver_register(&mv_xor_shared_driver); + if (!rc) { + rc = platform_driver_register(&mv_xor_driver); + if (rc) + platform_driver_unregister(&mv_xor_shared_driver); + } + return rc; +} +module_init(mv_xor_init); + +/* it's currently unsafe to unload this module */ +#if 0 +static void __exit mv_xor_exit(void) +{ + platform_driver_unregister(&mv_xor_driver); + platform_driver_unregister(&mv_xor_shared_driver); + return; +} + +module_exit(mv_xor_exit); +#endif + +MODULE_AUTHOR("Saeed Bishara "); +MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine"); +MODULE_LICENSE("GPL"); diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h new file mode 100644 index 00000000000..06cafe1ef52 --- /dev/null +++ b/drivers/dma/mv_xor.h @@ -0,0 +1,183 @@ +/* + * Copyright (C) 2007, 2008, Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#ifndef MV_XOR_H +#define MV_XOR_H + +#include +#include +#include +#include + +#define USE_TIMER +#define MV_XOR_SLOT_SIZE 64 +#define MV_XOR_THRESHOLD 1 + +#define XOR_OPERATION_MODE_XOR 0 +#define XOR_OPERATION_MODE_MEMCPY 2 +#define XOR_OPERATION_MODE_MEMSET 4 + +#define XOR_CURR_DESC(chan) (chan->mmr_base + 0x210 + (chan->idx * 4)) +#define XOR_NEXT_DESC(chan) (chan->mmr_base + 0x200 + (chan->idx * 4)) +#define XOR_BYTE_COUNT(chan) (chan->mmr_base + 0x220 + (chan->idx * 4)) +#define XOR_DEST_POINTER(chan) (chan->mmr_base + 0x2B0 + (chan->idx * 4)) +#define XOR_BLOCK_SIZE(chan) (chan->mmr_base + 0x2C0 + (chan->idx * 4)) +#define XOR_INIT_VALUE_LOW(chan) (chan->mmr_base + 0x2E0) +#define XOR_INIT_VALUE_HIGH(chan) (chan->mmr_base + 0x2E4) + +#define XOR_CONFIG(chan) (chan->mmr_base + 0x10 + (chan->idx * 4)) +#define XOR_ACTIVATION(chan) (chan->mmr_base + 0x20 + (chan->idx * 4)) +#define XOR_INTR_CAUSE(chan) (chan->mmr_base + 0x30) +#define XOR_INTR_MASK(chan) (chan->mmr_base + 0x40) +#define XOR_ERROR_CAUSE(chan) (chan->mmr_base + 0x50) +#define XOR_ERROR_ADDR(chan) (chan->mmr_base + 0x60) +#define XOR_INTR_MASK_VALUE 0x3F5 + +#define WINDOW_BASE(w) (0x250 + ((w) << 2)) +#define WINDOW_SIZE(w) (0x270 + ((w) << 2)) +#define WINDOW_REMAP_HIGH(w) (0x290 + ((w) << 2)) +#define WINDOW_BAR_ENABLE(chan) (0x240 + ((chan) << 2)) + +struct mv_xor_shared_private { + void __iomem *xor_base; + void __iomem *xor_high_base; +}; + + +/** + * struct mv_xor_device - internal representation of a XOR device + * @pdev: Platform device + * @id: HW XOR Device selector + * @dma_desc_pool: base of DMA descriptor region (DMA address) + * @dma_desc_pool_virt: base of DMA descriptor region (CPU address) + * @common: embedded struct dma_device + */ +struct mv_xor_device { + struct platform_device *pdev; + int id; + dma_addr_t dma_desc_pool; + void *dma_desc_pool_virt; + struct dma_device common; + struct mv_xor_shared_private *shared; +}; + +/** + * struct mv_xor_chan - internal representation of a XOR channel + * @pending: allows batching of hardware operations + * @completed_cookie: identifier for the most recently completed operation + * @lock: serializes enqueue/dequeue operations to the descriptors pool + * @mmr_base: memory mapped register base + * @idx: the index of the xor channel + * @chain: device chain view of the descriptors + * @completed_slots: slots completed by HW but still need to be acked + * @device: parent device + * @common: common dmaengine channel object members + * @last_used: place holder for allocation to continue from where it left off + * @all_slots: complete domain of slots usable by the channel + * @slots_allocated: records the actual size of the descriptor slot pool + * @irq_tasklet: bottom half where mv_xor_slot_cleanup runs + */ +struct mv_xor_chan { + int pending; + dma_cookie_t completed_cookie; + spinlock_t lock; /* protects the descriptor slot pool */ + void __iomem *mmr_base; + unsigned int idx; + enum dma_transaction_type current_type; + struct list_head chain; + struct list_head completed_slots; + struct mv_xor_device *device; + struct dma_chan common; + struct mv_xor_desc_slot *last_used; + struct list_head all_slots; + int slots_allocated; + struct tasklet_struct irq_tasklet; +#ifdef USE_TIMER + unsigned long cleanup_time; + u32 current_on_last_cleanup; + dma_cookie_t is_complete_cookie; +#endif +}; + +/** + * struct mv_xor_desc_slot - software descriptor + * @slot_node: node on the mv_xor_chan.all_slots list + * @chain_node: node on the mv_xor_chan.chain list + * @completed_node: node on the mv_xor_chan.completed_slots list + * @hw_desc: virtual address of the hardware descriptor chain + * @phys: hardware address of the hardware descriptor chain + * @group_head: first operation in a transaction + * @slot_cnt: total slots used in an transaction (group of operations) + * @slots_per_op: number of slots per operation + * @idx: pool index + * @unmap_src_cnt: number of xor sources + * @unmap_len: transaction bytecount + * @async_tx: support for the async_tx api + * @group_list: list of slots that make up a multi-descriptor transaction + * for example transfer lengths larger than the supported hw max + * @xor_check_result: result of zero sum + * @crc32_result: result crc calculation + */ +struct mv_xor_desc_slot { + struct list_head slot_node; + struct list_head chain_node; + struct list_head completed_node; + enum dma_transaction_type type; + void *hw_desc; + struct mv_xor_desc_slot *group_head; + u16 slot_cnt; + u16 slots_per_op; + u16 idx; + u16 unmap_src_cnt; + u32 value; + size_t unmap_len; + struct dma_async_tx_descriptor async_tx; + union { + u32 *xor_check_result; + u32 *crc32_result; + }; +#ifdef USE_TIMER + unsigned long arrival_time; + struct timer_list timeout; +#endif +}; + +/* This structure describes XOR descriptor size 64bytes */ +struct mv_xor_desc { + u32 status; /* descriptor execution status */ + u32 crc32_result; /* result of CRC-32 calculation */ + u32 desc_command; /* type of operation to be carried out */ + u32 phy_next_desc; /* next descriptor address pointer */ + u32 byte_count; /* size of src/dst blocks in bytes */ + u32 phy_dest_addr; /* destination block address */ + u32 phy_src_addr[8]; /* source block addresses */ + u32 reserved0; + u32 reserved1; +}; + +#define to_mv_sw_desc(addr_hw_desc) \ + container_of(addr_hw_desc, struct mv_xor_desc_slot, hw_desc) + +#define mv_hw_desc_slot_idx(hw_desc, idx) \ + ((void *)(((unsigned long)hw_desc) + ((idx) << 5))) + +#define MV_XOR_MIN_BYTE_COUNT (128) +#define XOR_MAX_BYTE_COUNT ((16 * 1024 * 1024) - 1) +#define MV_XOR_MAX_BYTE_COUNT XOR_MAX_BYTE_COUNT + + +#endif diff --git a/include/asm-arm/plat-orion/mv_xor.h b/include/asm-arm/plat-orion/mv_xor.h new file mode 100644 index 00000000000..c349e8ff5cc --- /dev/null +++ b/include/asm-arm/plat-orion/mv_xor.h @@ -0,0 +1,28 @@ +/* + * Marvell XOR platform device data definition file. + */ + +#ifndef __ASM_PLAT_ORION_MV_XOR_H +#define __ASM_PLAT_ORION_MV_XOR_H + +#include +#include + +#define MV_XOR_SHARED_NAME "mv_xor_shared" +#define MV_XOR_NAME "mv_xor" + +struct mbus_dram_target_info; + +struct mv_xor_platform_shared_data { + struct mbus_dram_target_info *dram; +}; + +struct mv_xor_platform_data { + struct platform_device *shared; + int hw_id; + dma_cap_mask_t cap_mask; + size_t pool_size; +}; + + +#endif From 4a776f0aa922a552460192c07b56f4fe9cd82632 Mon Sep 17 00:00:00 2001 From: Haavard Skinnemoen Date: Tue, 8 Jul 2008 11:58:45 -0700 Subject: [PATCH 0050/1435] dmatest: Simple DMA memcpy test client This client tests DMA memcpy using various lengths and various offsets into the source and destination buffers. It will initialize both buffers with a repeatable pattern and verify that the DMA engine copies the requested region and nothing more. It will also verify that the bytes aren't swapped around, and that the source buffer isn't modified. The dmatest module can be configured to test a specific device, a specific channel. It can also test multiple channels at the same time, and it can start multiple threads competing for the same channel. Changes since v2: * Support testing multiple channels at the same time * Support testing with multiple threads competing for the same channel * Use counting test patterns in order to catch byte ordering issues Changes since v1: * Remove extra dashes around "help" * Remove "default n" from Kconfig * Turn TEST_BUF_SIZE into a module parameter * Return DMA_NAK instead of DMA_DUP * Print unhandled events * Support testing specific channels and devices * Move to the end of the Makefile Acked-by: Maciej Sosnowski Signed-off-by: Haavard Skinnemoen Signed-off-by: Dan Williams --- drivers/dma/Kconfig | 7 + drivers/dma/Makefile | 1 + drivers/dma/dmatest.c | 444 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 452 insertions(+) create mode 100644 drivers/dma/dmatest.c diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 5af8b1cfc1e..4b6bd3d099c 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -72,4 +72,11 @@ config NET_DMA Say Y here if you enabled INTEL_IOATDMA or FSL_DMA, otherwise say N. +config DMATEST + tristate "DMA Test client" + depends on DMA_ENGINE + help + Simple DMA test client. Say N unless you're debugging a + DMA Device driver. + endif diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index ee272fd329c..181e3646fbf 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -1,5 +1,6 @@ obj-$(CONFIG_DMA_ENGINE) += dmaengine.o obj-$(CONFIG_NET_DMA) += iovlock.o +obj-$(CONFIG_DMATEST) += dmatest.o obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o ioatdma-objs := ioat.o ioat_dma.o ioat_dca.o obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c new file mode 100644 index 00000000000..a08d1970474 --- /dev/null +++ b/drivers/dma/dmatest.c @@ -0,0 +1,444 @@ +/* + * DMA Engine test module + * + * Copyright (C) 2007 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +static unsigned int test_buf_size = 16384; +module_param(test_buf_size, uint, S_IRUGO); +MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer"); + +static char test_channel[BUS_ID_SIZE]; +module_param_string(channel, test_channel, sizeof(test_channel), S_IRUGO); +MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)"); + +static char test_device[BUS_ID_SIZE]; +module_param_string(device, test_device, sizeof(test_device), S_IRUGO); +MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)"); + +static unsigned int threads_per_chan = 1; +module_param(threads_per_chan, uint, S_IRUGO); +MODULE_PARM_DESC(threads_per_chan, + "Number of threads to start per channel (default: 1)"); + +static unsigned int max_channels; +module_param(max_channels, uint, S_IRUGO); +MODULE_PARM_DESC(nr_channels, + "Maximum number of channels to use (default: all)"); + +/* + * Initialization patterns. All bytes in the source buffer has bit 7 + * set, all bytes in the destination buffer has bit 7 cleared. + * + * Bit 6 is set for all bytes which are to be copied by the DMA + * engine. Bit 5 is set for all bytes which are to be overwritten by + * the DMA engine. + * + * The remaining bits are the inverse of a counter which increments by + * one for each byte address. + */ +#define PATTERN_SRC 0x80 +#define PATTERN_DST 0x00 +#define PATTERN_COPY 0x40 +#define PATTERN_OVERWRITE 0x20 +#define PATTERN_COUNT_MASK 0x1f + +struct dmatest_thread { + struct list_head node; + struct task_struct *task; + struct dma_chan *chan; + u8 *srcbuf; + u8 *dstbuf; +}; + +struct dmatest_chan { + struct list_head node; + struct dma_chan *chan; + struct list_head threads; +}; + +/* + * These are protected by dma_list_mutex since they're only used by + * the DMA client event callback + */ +static LIST_HEAD(dmatest_channels); +static unsigned int nr_channels; + +static bool dmatest_match_channel(struct dma_chan *chan) +{ + if (test_channel[0] == '\0') + return true; + return strcmp(chan->dev.bus_id, test_channel) == 0; +} + +static bool dmatest_match_device(struct dma_device *device) +{ + if (test_device[0] == '\0') + return true; + return strcmp(device->dev->bus_id, test_device) == 0; +} + +static unsigned long dmatest_random(void) +{ + unsigned long buf; + + get_random_bytes(&buf, sizeof(buf)); + return buf; +} + +static void dmatest_init_srcbuf(u8 *buf, unsigned int start, unsigned int len) +{ + unsigned int i; + + for (i = 0; i < start; i++) + buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); + for ( ; i < start + len; i++) + buf[i] = PATTERN_SRC | PATTERN_COPY + | (~i & PATTERN_COUNT_MASK);; + for ( ; i < test_buf_size; i++) + buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); +} + +static void dmatest_init_dstbuf(u8 *buf, unsigned int start, unsigned int len) +{ + unsigned int i; + + for (i = 0; i < start; i++) + buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK); + for ( ; i < start + len; i++) + buf[i] = PATTERN_DST | PATTERN_OVERWRITE + | (~i & PATTERN_COUNT_MASK); + for ( ; i < test_buf_size; i++) + buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK); +} + +static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index, + unsigned int counter, bool is_srcbuf) +{ + u8 diff = actual ^ pattern; + u8 expected = pattern | (~counter & PATTERN_COUNT_MASK); + const char *thread_name = current->comm; + + if (is_srcbuf) + pr_warning("%s: srcbuf[0x%x] overwritten!" + " Expected %02x, got %02x\n", + thread_name, index, expected, actual); + else if ((pattern & PATTERN_COPY) + && (diff & (PATTERN_COPY | PATTERN_OVERWRITE))) + pr_warning("%s: dstbuf[0x%x] not copied!" + " Expected %02x, got %02x\n", + thread_name, index, expected, actual); + else if (diff & PATTERN_SRC) + pr_warning("%s: dstbuf[0x%x] was copied!" + " Expected %02x, got %02x\n", + thread_name, index, expected, actual); + else + pr_warning("%s: dstbuf[0x%x] mismatch!" + " Expected %02x, got %02x\n", + thread_name, index, expected, actual); +} + +static unsigned int dmatest_verify(u8 *buf, unsigned int start, + unsigned int end, unsigned int counter, u8 pattern, + bool is_srcbuf) +{ + unsigned int i; + unsigned int error_count = 0; + u8 actual; + + for (i = start; i < end; i++) { + actual = buf[i]; + if (actual != (pattern | (~counter & PATTERN_COUNT_MASK))) { + if (error_count < 32) + dmatest_mismatch(actual, pattern, i, counter, + is_srcbuf); + error_count++; + } + counter++; + } + + if (error_count > 32) + pr_warning("%s: %u errors suppressed\n", + current->comm, error_count - 32); + + return error_count; +} + +/* + * This function repeatedly tests DMA transfers of various lengths and + * offsets until it is told to exit by kthread_stop(). There may be + * multiple threads running this function in parallel for a single + * channel, and there may be multiple channels being tested in + * parallel. + * + * Before each test, the source and destination buffer is initialized + * with a known pattern. This pattern is different depending on + * whether it's in an area which is supposed to be copied or + * overwritten, and different in the source and destination buffers. + * So if the DMA engine doesn't copy exactly what we tell it to copy, + * we'll notice. + */ +static int dmatest_func(void *data) +{ + struct dmatest_thread *thread = data; + struct dma_chan *chan; + const char *thread_name; + unsigned int src_off, dst_off, len; + unsigned int error_count; + unsigned int failed_tests = 0; + unsigned int total_tests = 0; + dma_cookie_t cookie; + enum dma_status status; + int ret; + + thread_name = current->comm; + + ret = -ENOMEM; + thread->srcbuf = kmalloc(test_buf_size, GFP_KERNEL); + if (!thread->srcbuf) + goto err_srcbuf; + thread->dstbuf = kmalloc(test_buf_size, GFP_KERNEL); + if (!thread->dstbuf) + goto err_dstbuf; + + smp_rmb(); + chan = thread->chan; + dma_chan_get(chan); + + while (!kthread_should_stop()) { + total_tests++; + + len = dmatest_random() % test_buf_size + 1; + src_off = dmatest_random() % (test_buf_size - len + 1); + dst_off = dmatest_random() % (test_buf_size - len + 1); + + dmatest_init_srcbuf(thread->srcbuf, src_off, len); + dmatest_init_dstbuf(thread->dstbuf, dst_off, len); + + cookie = dma_async_memcpy_buf_to_buf(chan, + thread->dstbuf + dst_off, + thread->srcbuf + src_off, + len); + if (dma_submit_error(cookie)) { + pr_warning("%s: #%u: submit error %d with src_off=0x%x " + "dst_off=0x%x len=0x%x\n", + thread_name, total_tests - 1, cookie, + src_off, dst_off, len); + msleep(100); + failed_tests++; + continue; + } + dma_async_memcpy_issue_pending(chan); + + do { + msleep(1); + status = dma_async_memcpy_complete( + chan, cookie, NULL, NULL); + } while (status == DMA_IN_PROGRESS); + + if (status == DMA_ERROR) { + pr_warning("%s: #%u: error during copy\n", + thread_name, total_tests - 1); + failed_tests++; + continue; + } + + error_count = 0; + + pr_debug("%s: verifying source buffer...\n", thread_name); + error_count += dmatest_verify(thread->srcbuf, 0, src_off, + 0, PATTERN_SRC, true); + error_count += dmatest_verify(thread->srcbuf, src_off, + src_off + len, src_off, + PATTERN_SRC | PATTERN_COPY, true); + error_count += dmatest_verify(thread->srcbuf, src_off + len, + test_buf_size, src_off + len, + PATTERN_SRC, true); + + pr_debug("%s: verifying dest buffer...\n", + thread->task->comm); + error_count += dmatest_verify(thread->dstbuf, 0, dst_off, + 0, PATTERN_DST, false); + error_count += dmatest_verify(thread->dstbuf, dst_off, + dst_off + len, src_off, + PATTERN_SRC | PATTERN_COPY, false); + error_count += dmatest_verify(thread->dstbuf, dst_off + len, + test_buf_size, dst_off + len, + PATTERN_DST, false); + + if (error_count) { + pr_warning("%s: #%u: %u errors with " + "src_off=0x%x dst_off=0x%x len=0x%x\n", + thread_name, total_tests - 1, error_count, + src_off, dst_off, len); + failed_tests++; + } else { + pr_debug("%s: #%u: No errors with " + "src_off=0x%x dst_off=0x%x len=0x%x\n", + thread_name, total_tests - 1, + src_off, dst_off, len); + } + } + + ret = 0; + dma_chan_put(chan); + kfree(thread->dstbuf); +err_dstbuf: + kfree(thread->srcbuf); +err_srcbuf: + pr_notice("%s: terminating after %u tests, %u failures (status %d)\n", + thread_name, total_tests, failed_tests, ret); + return ret; +} + +static void dmatest_cleanup_channel(struct dmatest_chan *dtc) +{ + struct dmatest_thread *thread; + struct dmatest_thread *_thread; + int ret; + + list_for_each_entry_safe(thread, _thread, &dtc->threads, node) { + ret = kthread_stop(thread->task); + pr_debug("dmatest: thread %s exited with status %d\n", + thread->task->comm, ret); + list_del(&thread->node); + kfree(thread); + } + kfree(dtc); +} + +static enum dma_state_client dmatest_add_channel(struct dma_chan *chan) +{ + struct dmatest_chan *dtc; + struct dmatest_thread *thread; + unsigned int i; + + dtc = kmalloc(sizeof(struct dmatest_chan), GFP_ATOMIC); + if (!dtc) { + pr_warning("dmatest: No memory for %s\n", chan->dev.bus_id); + return DMA_NAK; + } + + dtc->chan = chan; + INIT_LIST_HEAD(&dtc->threads); + + for (i = 0; i < threads_per_chan; i++) { + thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL); + if (!thread) { + pr_warning("dmatest: No memory for %s-test%u\n", + chan->dev.bus_id, i); + break; + } + thread->chan = dtc->chan; + smp_wmb(); + thread->task = kthread_run(dmatest_func, thread, "%s-test%u", + chan->dev.bus_id, i); + if (IS_ERR(thread->task)) { + pr_warning("dmatest: Failed to run thread %s-test%u\n", + chan->dev.bus_id, i); + kfree(thread); + break; + } + + /* srcbuf and dstbuf are allocated by the thread itself */ + + list_add_tail(&thread->node, &dtc->threads); + } + + pr_info("dmatest: Started %u threads using %s\n", i, chan->dev.bus_id); + + list_add_tail(&dtc->node, &dmatest_channels); + nr_channels++; + + return DMA_ACK; +} + +static enum dma_state_client dmatest_remove_channel(struct dma_chan *chan) +{ + struct dmatest_chan *dtc, *_dtc; + + list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) { + if (dtc->chan == chan) { + list_del(&dtc->node); + dmatest_cleanup_channel(dtc); + pr_debug("dmatest: lost channel %s\n", + chan->dev.bus_id); + return DMA_ACK; + } + } + + return DMA_DUP; +} + +/* + * Start testing threads as new channels are assigned to us, and kill + * them when the channels go away. + * + * When we unregister the client, all channels are removed so this + * will also take care of cleaning things up when the module is + * unloaded. + */ +static enum dma_state_client +dmatest_event(struct dma_client *client, struct dma_chan *chan, + enum dma_state state) +{ + enum dma_state_client ack = DMA_NAK; + + switch (state) { + case DMA_RESOURCE_AVAILABLE: + if (!dmatest_match_channel(chan) + || !dmatest_match_device(chan->device)) + ack = DMA_DUP; + else if (max_channels && nr_channels >= max_channels) + ack = DMA_NAK; + else + ack = dmatest_add_channel(chan); + break; + + case DMA_RESOURCE_REMOVED: + ack = dmatest_remove_channel(chan); + break; + + default: + pr_info("dmatest: Unhandled event %u (%s)\n", + state, chan->dev.bus_id); + break; + } + + return ack; +} + +static struct dma_client dmatest_client = { + .event_callback = dmatest_event, +}; + +static int __init dmatest_init(void) +{ + dma_cap_set(DMA_MEMCPY, dmatest_client.cap_mask); + dma_async_client_register(&dmatest_client); + dma_async_client_chan_request(&dmatest_client); + + return 0; +} +module_init(dmatest_init); + +static void __exit dmatest_exit(void) +{ + dma_async_client_unregister(&dmatest_client); +} +module_exit(dmatest_exit); + +MODULE_AUTHOR("Haavard Skinnemoen "); +MODULE_LICENSE("GPL v2"); From 848c536a37b8db4e461f14ca15fe29850151c822 Mon Sep 17 00:00:00 2001 From: Haavard Skinnemoen Date: Tue, 8 Jul 2008 11:58:58 -0700 Subject: [PATCH 0051/1435] dmaengine: Add dma_client parameter to device_alloc_chan_resources A DMA controller capable of doing slave transfers may need to know a few things about the slave when preparing the channel. We don't want to add this information to struct dma_channel since the channel hasn't yet been bound to a client at this point. Instead, pass a reference to the client requesting the channel to the driver's device_alloc_chan_resources hook so that it can pick the necessary information from the dma_client struct by itself. [dan.j.williams@intel.com: fixed up fsldma and mv_xor] Acked-by: Maciej Sosnowski Signed-off-by: Haavard Skinnemoen Signed-off-by: Dan Williams --- drivers/dma/dmaengine.c | 3 ++- drivers/dma/fsldma.c | 7 ++++--- drivers/dma/ioat_dma.c | 5 +++-- drivers/dma/iop-adma.c | 7 ++++--- drivers/dma/mv_xor.c | 7 ++++--- include/linux/dmaengine.h | 3 ++- 6 files changed, 19 insertions(+), 13 deletions(-) diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 10de69eb1a3..7344f5dbd50 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -174,7 +174,8 @@ static void dma_client_chan_alloc(struct dma_client *client) if (!dma_chan_satisfies_mask(chan, client->cap_mask)) continue; - desc = chan->device->device_alloc_chan_resources(chan); + desc = chan->device->device_alloc_chan_resources( + chan, client); if (desc >= 0) { ack = client->event_callback(client, chan, diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 724f6fdd0af..c0059ca5834 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -366,7 +366,8 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor( * * Return - The number of descriptors allocated. */ -static int fsl_dma_alloc_chan_resources(struct dma_chan *chan) +static int fsl_dma_alloc_chan_resources(struct dma_chan *chan, + struct dma_client *client) { struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); LIST_HEAD(tmp_list); @@ -819,7 +820,7 @@ static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan) chan = &fsl_chan->common; - if (fsl_dma_alloc_chan_resources(chan) < 1) { + if (fsl_dma_alloc_chan_resources(chan, NULL) < 1) { dev_err(fsl_chan->dev, "selftest: Cannot alloc resources for DMA\n"); err = -ENODEV; @@ -847,7 +848,7 @@ static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan) /* Test free and re-alloc channel resources */ fsl_dma_free_chan_resources(chan); - if (fsl_dma_alloc_chan_resources(chan) < 1) { + if (fsl_dma_alloc_chan_resources(chan, NULL) < 1) { dev_err(fsl_chan->dev, "selftest: Cannot alloc resources for DMA\n"); err = -ENODEV; diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c index 318e8a22d81..90e5b0a28cb 100644 --- a/drivers/dma/ioat_dma.c +++ b/drivers/dma/ioat_dma.c @@ -452,7 +452,8 @@ static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan) * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors * @chan: the channel to be filled out */ -static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) +static int ioat_dma_alloc_chan_resources(struct dma_chan *chan, + struct dma_client *client) { struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); struct ioat_desc_sw *desc; @@ -1049,7 +1050,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device) dma_chan = container_of(device->common.channels.next, struct dma_chan, device_node); - if (device->common.device_alloc_chan_resources(dma_chan) < 1) { + if (device->common.device_alloc_chan_resources(dma_chan, NULL) < 1) { dev_err(&device->pdev->dev, "selftest cannot allocate chan resource\n"); err = -ENODEV; diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index 4e6b052c065..b57564dd023 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c @@ -444,7 +444,8 @@ static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan); static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan); /* returns the number of allocated descriptors */ -static int iop_adma_alloc_chan_resources(struct dma_chan *chan) +static int iop_adma_alloc_chan_resources(struct dma_chan *chan, + struct dma_client *client) { char *hw_desc; int idx; @@ -838,7 +839,7 @@ static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device) dma_chan = container_of(device->common.channels.next, struct dma_chan, device_node); - if (iop_adma_alloc_chan_resources(dma_chan) < 1) { + if (iop_adma_alloc_chan_resources(dma_chan, NULL) < 1) { err = -ENODEV; goto out; } @@ -936,7 +937,7 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device) dma_chan = container_of(device->common.channels.next, struct dma_chan, device_node); - if (iop_adma_alloc_chan_resources(dma_chan) < 1) { + if (iop_adma_alloc_chan_resources(dma_chan, NULL) < 1) { err = -ENODEV; goto out; } diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index f0c123ce8ae..8239cfdbc2e 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -588,7 +588,8 @@ submit_done: } /* returns the number of allocated descriptors */ -static int mv_xor_alloc_chan_resources(struct dma_chan *chan) +static int mv_xor_alloc_chan_resources(struct dma_chan *chan, + struct dma_client *client) { char *hw_desc; int idx; @@ -938,7 +939,7 @@ static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device) dma_chan = container_of(device->common.channels.next, struct dma_chan, device_node); - if (mv_xor_alloc_chan_resources(dma_chan) < 1) { + if (mv_xor_alloc_chan_resources(dma_chan, NULL) < 1) { err = -ENODEV; goto out; } @@ -1033,7 +1034,7 @@ mv_xor_xor_self_test(struct mv_xor_device *device) dma_chan = container_of(device->common.channels.next, struct dma_chan, device_node); - if (mv_xor_alloc_chan_resources(dma_chan) < 1) { + if (mv_xor_alloc_chan_resources(dma_chan, NULL) < 1) { err = -ENODEV; goto out; } diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 6432b834322..ba89b0f5056 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -281,7 +281,8 @@ struct dma_device { int dev_id; struct device *dev; - int (*device_alloc_chan_resources)(struct dma_chan *chan); + int (*device_alloc_chan_resources)(struct dma_chan *chan, + struct dma_client *client); void (*device_free_chan_resources)(struct dma_chan *chan); struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( From e1d181efb14a93cf263d6c588a5395518edf3294 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Fri, 4 Jul 2008 00:13:40 -0700 Subject: [PATCH 0052/1435] dmaengine: add DMA_COMPL_SKIP_{SRC,DEST}_UNMAP flags to control dma unmap In some cases client code may need the dma-driver to skip the unmap of source and/or destination buffers. Setting these flags indicates to the driver to skip the unmap step. In this regard async_xor is currently broken in that it allows the destination buffer to be unmapped while an operation is still in progress, i.e. when the number of sources exceeds the hardware channel's maximum (fixed in a subsequent patch). Acked-by: Saeed Bishara Acked-by: Maciej Sosnowski Acked-by: Haavard Skinnemoen Signed-off-by: Dan Williams --- drivers/dma/ioat_dma.c | 48 +++++++++++++++++++-------------------- drivers/dma/iop-adma.c | 27 ++++++++++++++-------- drivers/dma/mv_xor.c | 22 +++++++++++++----- include/linux/dmaengine.h | 4 ++++ 4 files changed, 60 insertions(+), 41 deletions(-) diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c index 90e5b0a28cb..171cad69f31 100644 --- a/drivers/dma/ioat_dma.c +++ b/drivers/dma/ioat_dma.c @@ -757,6 +757,27 @@ static void ioat_dma_cleanup_tasklet(unsigned long data) chan->reg_base + IOAT_CHANCTRL_OFFSET); } +static void +ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc) +{ + /* + * yes we are unmapping both _page and _single + * alloc'd regions with unmap_page. Is this + * *really* that bad? + */ + if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) + pci_unmap_page(ioat_chan->device->pdev, + pci_unmap_addr(desc, dst), + pci_unmap_len(desc, len), + PCI_DMA_FROMDEVICE); + + if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) + pci_unmap_page(ioat_chan->device->pdev, + pci_unmap_addr(desc, src), + pci_unmap_len(desc, len), + PCI_DMA_TODEVICE); +} + /** * ioat_dma_memcpy_cleanup - cleanup up finished descriptors * @chan: ioat channel to be cleaned up @@ -817,21 +838,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) */ if (desc->async_tx.cookie) { cookie = desc->async_tx.cookie; - - /* - * yes we are unmapping both _page and _single - * alloc'd regions with unmap_page. Is this - * *really* that bad? - */ - pci_unmap_page(ioat_chan->device->pdev, - pci_unmap_addr(desc, dst), - pci_unmap_len(desc, len), - PCI_DMA_FROMDEVICE); - pci_unmap_page(ioat_chan->device->pdev, - pci_unmap_addr(desc, src), - pci_unmap_len(desc, len), - PCI_DMA_TODEVICE); - + ioat_dma_unmap(ioat_chan, desc); if (desc->async_tx.callback) { desc->async_tx.callback(desc->async_tx.callback_param); desc->async_tx.callback = NULL; @@ -890,16 +897,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) if (desc->async_tx.cookie) { cookie = desc->async_tx.cookie; desc->async_tx.cookie = 0; - - pci_unmap_page(ioat_chan->device->pdev, - pci_unmap_addr(desc, dst), - pci_unmap_len(desc, len), - PCI_DMA_FROMDEVICE); - pci_unmap_page(ioat_chan->device->pdev, - pci_unmap_addr(desc, src), - pci_unmap_len(desc, len), - PCI_DMA_TODEVICE); - + ioat_dma_unmap(ioat_chan, desc); if (desc->async_tx.callback) { desc->async_tx.callback(desc->async_tx.callback_param); desc->async_tx.callback = NULL; diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index b57564dd023..434013d4128 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c @@ -82,17 +82,24 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, struct device *dev = &iop_chan->device->pdev->dev; u32 len = unmap->unmap_len; - u32 src_cnt = unmap->unmap_src_cnt; - dma_addr_t addr = iop_desc_get_dest_addr(unmap, - iop_chan); + enum dma_ctrl_flags flags = desc->async_tx.flags; + u32 src_cnt; + dma_addr_t addr; - dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE); - while (src_cnt--) { - addr = iop_desc_get_src_addr(unmap, - iop_chan, - src_cnt); - dma_unmap_page(dev, addr, len, - DMA_TO_DEVICE); + if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { + addr = iop_desc_get_dest_addr(unmap, iop_chan); + dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE); + } + + if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { + src_cnt = unmap->unmap_src_cnt; + while (src_cnt--) { + addr = iop_desc_get_src_addr(unmap, + iop_chan, + src_cnt); + dma_unmap_page(dev, addr, len, + DMA_TO_DEVICE); + } } desc->group_head = NULL; } diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 8239cfdbc2e..a4e4494663b 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -308,13 +308,23 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc, struct device *dev = &mv_chan->device->pdev->dev; u32 len = unmap->unmap_len; - u32 src_cnt = unmap->unmap_src_cnt; - dma_addr_t addr = mv_desc_get_dest_addr(unmap); + enum dma_ctrl_flags flags = desc->async_tx.flags; + u32 src_cnt; + dma_addr_t addr; - dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE); - while (src_cnt--) { - addr = mv_desc_get_src_addr(unmap, src_cnt); - dma_unmap_page(dev, addr, len, DMA_TO_DEVICE); + if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { + addr = mv_desc_get_dest_addr(unmap); + dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE); + } + + if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { + src_cnt = unmap->unmap_src_cnt; + while (src_cnt--) { + addr = mv_desc_get_src_addr(unmap, + src_cnt); + dma_unmap_page(dev, addr, len, + DMA_TO_DEVICE); + } } desc->group_head = NULL; } diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index ba89b0f5056..b058d636038 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -102,10 +102,14 @@ enum dma_transaction_type { * @DMA_CTRL_ACK - the descriptor cannot be reused until the client * acknowledges receipt, i.e. has has a chance to establish any * dependency chains + * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s) + * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s) */ enum dma_ctrl_flags { DMA_PREP_INTERRUPT = (1 << 0), DMA_CTRL_ACK = (1 << 1), + DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2), + DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3), }; /** From dc0ee6435cb92ccc81b14ff28d163fecc5a7f120 Mon Sep 17 00:00:00 2001 From: Haavard Skinnemoen Date: Tue, 8 Jul 2008 11:59:35 -0700 Subject: [PATCH 0053/1435] dmaengine: Add slave DMA interface This patch adds the necessary interfaces to the DMA Engine framework to use functionality found on most embedded DMA controllers: DMA from and to I/O registers with hardware handshaking. In this context, hardware hanshaking means that the peripheral that owns the I/O registers in question is able to tell the DMA controller when more data is available for reading, or when there is room for more data to be written. This usually happens internally on the chip, but these signals may also be exported outside the chip for things like IDE DMA, etc. A new struct dma_slave is introduced. This contains information that the DMA engine driver needs to set up slave transfers to and from a slave device. Most engines supporting DMA slave transfers will want to extend this structure with controller-specific parameters. This additional information is usually passed from the platform/board code through the client driver. A "slave" pointer is added to the dma_client struct. This must point to a valid dma_slave structure iff the DMA_SLAVE capability is requested. The DMA engine driver may use this information in its device_alloc_chan_resources hook to configure the DMA controller for slave transfers from and to the given slave device. A new operation for preparing slave DMA transfers is added to struct dma_device. This takes a scatterlist and returns a single descriptor representing the whole transfer. Another new operation for terminating all pending transfers is added as well. The latter is needed because there may be errors outside the scope of the DMA Engine framework that may require DMA operations to be terminated prematurely. DMA Engine drivers may extend the dma_device, dma_chan and/or dma_slave_descriptor structures to allow controller-specific operations. The client driver can detect such extensions by looking at the DMA Engine's struct device, or it can request a specific DMA Engine device by setting the dma_dev field in struct dma_slave. dmaslave interface changes since v4: * Fix checkpatch errors * Fix changelog (there are no slave descriptors anymore) dmaslave interface changes since v3: * Use dma_data_direction instead of a new enum * Submit slave transfers as scatterlists * Remove the DMA slave descriptor struct dmaslave interface changes since v2: * Add a dma_dev field to struct dma_slave. If set, the client can only be bound to the DMA controller that corresponds to this device. This allows controller-specific extensions of the dma_slave structure; if the device matches, the controller may safely assume its extensions are present. * Move reg_width into struct dma_slave as there are currently no users that need to be able to set the width on a per-transfer basis. dmaslave interface changes since v1: * Drop the set_direction and set_width descriptor hooks. Pass the direction and width to the prep function instead. * Declare a dma_slave struct with fixed information about a slave, i.e. register addresses, handshake interfaces and such. * Add pointer to a dma_slave struct to dma_client. Can be NULL if the DMA_SLAVE capability isn't requested. * Drop the set_slave device hook since the alloc_chan_resources hook now has enough information to set up the channel for slave transfers. Acked-by: Maciej Sosnowski Signed-off-by: Haavard Skinnemoen Signed-off-by: Dan Williams --- drivers/dma/dmaengine.c | 16 +++++++++++- include/linux/dmaengine.h | 52 ++++++++++++++++++++++++++++++++++++++- 2 files changed, 66 insertions(+), 2 deletions(-) diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 7344f5dbd50..dc003a3a787 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -169,7 +169,12 @@ static void dma_client_chan_alloc(struct dma_client *client) enum dma_state_client ack; /* Find a channel */ - list_for_each_entry(device, &dma_device_list, global_node) + list_for_each_entry(device, &dma_device_list, global_node) { + /* Does the client require a specific DMA controller? */ + if (client->slave && client->slave->dma_dev + && client->slave->dma_dev != device->dev) + continue; + list_for_each_entry(chan, &device->channels, device_node) { if (!dma_chan_satisfies_mask(chan, client->cap_mask)) continue; @@ -191,6 +196,7 @@ static void dma_client_chan_alloc(struct dma_client *client) return; } } + } } enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) @@ -289,6 +295,10 @@ static void dma_clients_notify_removed(struct dma_chan *chan) */ void dma_async_client_register(struct dma_client *client) { + /* validate client data */ + BUG_ON(dma_has_cap(DMA_SLAVE, client->cap_mask) && + !client->slave); + mutex_lock(&dma_list_mutex); list_add_tail(&client->global_node, &dma_client_list); mutex_unlock(&dma_list_mutex); @@ -365,6 +375,10 @@ int dma_async_device_register(struct dma_device *device) !device->device_prep_dma_memset); BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt); + BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && + !device->device_prep_slave_sg); + BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && + !device->device_terminate_all); BUG_ON(!device->device_alloc_chan_resources); BUG_ON(!device->device_free_chan_resources); diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index b058d636038..9b91d341e1f 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -89,10 +89,23 @@ enum dma_transaction_type { DMA_MEMSET, DMA_MEMCPY_CRC32C, DMA_INTERRUPT, + DMA_SLAVE, }; /* last transaction type for creation of the capabilities mask */ -#define DMA_TX_TYPE_END (DMA_INTERRUPT + 1) +#define DMA_TX_TYPE_END (DMA_SLAVE + 1) + +/** + * enum dma_slave_width - DMA slave register access width. + * @DMA_SLAVE_WIDTH_8BIT: Do 8-bit slave register accesses + * @DMA_SLAVE_WIDTH_16BIT: Do 16-bit slave register accesses + * @DMA_SLAVE_WIDTH_32BIT: Do 32-bit slave register accesses + */ +enum dma_slave_width { + DMA_SLAVE_WIDTH_8BIT, + DMA_SLAVE_WIDTH_16BIT, + DMA_SLAVE_WIDTH_32BIT, +}; /** * enum dma_ctrl_flags - DMA flags to augment operation preparation, @@ -118,6 +131,32 @@ enum dma_ctrl_flags { */ typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t; +/** + * struct dma_slave - Information about a DMA slave + * @dev: device acting as DMA slave + * @dma_dev: required DMA master device. If non-NULL, the client can not be + * bound to other masters than this. + * @tx_reg: physical address of data register used for + * memory-to-peripheral transfers + * @rx_reg: physical address of data register used for + * peripheral-to-memory transfers + * @reg_width: peripheral register width + * + * If dma_dev is non-NULL, the client can not be bound to other DMA + * masters than the one corresponding to this device. The DMA master + * driver may use this to determine if there is controller-specific + * data wrapped around this struct. Drivers of platform code that sets + * the dma_dev field must therefore make sure to use an appropriate + * controller-specific dma slave structure wrapping this struct. + */ +struct dma_slave { + struct device *dev; + struct device *dma_dev; + dma_addr_t tx_reg; + dma_addr_t rx_reg; + enum dma_slave_width reg_width; +}; + /** * struct dma_chan_percpu - the per-CPU part of struct dma_chan * @refcount: local_t used for open-coded "bigref" counting @@ -208,11 +247,14 @@ typedef enum dma_state_client (*dma_event_callback) (struct dma_client *client, * @event_callback: func ptr to call when something happens * @cap_mask: only return channels that satisfy the requested capabilities * a value of zero corresponds to any capability + * @slave: data for preparing slave transfer. Must be non-NULL iff the + * DMA_SLAVE capability is requested. * @global_node: list_head for global dma_client_list */ struct dma_client { dma_event_callback event_callback; dma_cap_mask_t cap_mask; + struct dma_slave *slave; struct list_head global_node; }; @@ -269,6 +311,8 @@ struct dma_async_tx_descriptor { * @device_prep_dma_zero_sum: prepares a zero_sum operation * @device_prep_dma_memset: prepares a memset operation * @device_prep_dma_interrupt: prepares an end of chain interrupt operation + * @device_prep_slave_sg: prepares a slave dma operation + * @device_terminate_all: terminate all pending operations * @device_issue_pending: push pending transactions to hardware */ struct dma_device { @@ -304,6 +348,12 @@ struct dma_device { struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( struct dma_chan *chan, unsigned long flags); + struct dma_async_tx_descriptor *(*device_prep_slave_sg)( + struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sg_len, enum dma_data_direction direction, + unsigned long flags); + void (*device_terminate_all)(struct dma_chan *chan); + enum dma_status (*device_is_tx_complete)(struct dma_chan *chan, dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used); From 3bfb1d20b547a5071d01344581eac5846ea84491 Mon Sep 17 00:00:00 2001 From: Haavard Skinnemoen Date: Tue, 8 Jul 2008 11:59:42 -0700 Subject: [PATCH 0054/1435] dmaengine: Driver for the Synopsys DesignWare DMA controller This adds a driver for the Synopsys DesignWare DMA controller (aka DMACA on AVR32 systems.) This DMA controller can be found integrated on the AT32AP7000 chip and is primarily meant for peripheral DMA transfer, but can also be used for memory-to-memory transfers. This patch is based on a driver from David Brownell which was based on an older version of the DMA Engine framework. It also implements the proposed extensions to the DMA Engine API for slave DMA operations. The dmatest client shows no problems, but there may still be room for improvement performance-wise. DMA slave transfer performance is definitely "good enough"; reading 100 MiB from an SD card running at ~20 MHz yields ~7.2 MiB/s average transfer rate. Full documentation for this controller can be found in the Synopsys DW AHB DMAC Databook: http://www.synopsys.com/designware/docs/iip/DW_ahb_dmac/latest/doc/dw_ahb_dmac_db.pdf The controller has lots of implementation options, so it's usually a good idea to check the data sheet of the chip it's intergrated on as well. The AT32AP7000 data sheet can be found here: http://www.atmel.com/dyn/products/datasheets.asp?family_id=682 Changes since v4: * Use client_count instead of dma_chan_is_in_use() * Add missing include * Unmap buffers unless client told us not to Changes since v3: * Update to latest DMA engine and DMA slave APIs * Embed the hw descriptor into the sw descriptor * Clean up and update MODULE_DESCRIPTION, copyright date, etc. Changes since v2: * Dequeue all pending transfers in terminate_all() * Rename dw_dmac.h -> dw_dmac_regs.h * Define and use controller-specific dma_slave data * Fix up a few outdated comments * Define hardware registers as structs (doesn't generate better code, unfortunately, but it looks nicer.) * Get number of channels from platform_data instead of hardcoding it based on CONFIG_WHATEVER_CPU. * Give slave clients exclusive access to the channel Acked-by: Maciej Sosnowski , Signed-off-by: Haavard Skinnemoen Signed-off-by: Dan Williams --- arch/avr32/mach-at32ap/at32ap700x.c | 27 +- drivers/dma/Kconfig | 9 + drivers/dma/Makefile | 1 + drivers/dma/dw_dmac.c | 1122 ++++++++++++++++++++ drivers/dma/dw_dmac_regs.h | 225 ++++ include/asm-avr32/arch-at32ap/at32ap700x.h | 16 + include/linux/dw_dmac.h | 62 ++ 7 files changed, 1449 insertions(+), 13 deletions(-) create mode 100644 drivers/dma/dw_dmac.c create mode 100644 drivers/dma/dw_dmac_regs.h create mode 100644 include/linux/dw_dmac.h diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c index 0f24b4f85c1..892e27e0d58 100644 --- a/arch/avr32/mach-at32ap/at32ap700x.c +++ b/arch/avr32/mach-at32ap/at32ap700x.c @@ -7,6 +7,7 @@ */ #include #include +#include #include #include #include @@ -599,6 +600,17 @@ static void __init genclk_init_parent(struct clk *clk) clk->parent = parent; } +static struct dw_dma_platform_data dw_dmac0_data = { + .nr_channels = 3, +}; + +static struct resource dw_dmac0_resource[] = { + PBMEM(0xff200000), + IRQ(2), +}; +DEFINE_DEV_DATA(dw_dmac, 0); +DEV_CLK(hclk, dw_dmac0, hsb, 10); + /* -------------------------------------------------------------------- * System peripherals * -------------------------------------------------------------------- */ @@ -705,17 +717,6 @@ static struct clk pico_clk = { .users = 1, }; -static struct resource dmaca0_resource[] = { - { - .start = 0xff200000, - .end = 0xff20ffff, - .flags = IORESOURCE_MEM, - }, - IRQ(2), -}; -DEFINE_DEV(dmaca, 0); -DEV_CLK(hclk, dmaca0, hsb, 10); - /* -------------------------------------------------------------------- * HMATRIX * -------------------------------------------------------------------- */ @@ -828,7 +829,7 @@ void __init at32_add_system_devices(void) platform_device_register(&at32_eic0_device); platform_device_register(&smc0_device); platform_device_register(&pdc_device); - platform_device_register(&dmaca0_device); + platform_device_register(&dw_dmac0_device); platform_device_register(&at32_tcb0_device); platform_device_register(&at32_tcb1_device); @@ -1891,7 +1892,7 @@ struct clk *at32_clock_list[] = { &smc0_mck, &pdc_hclk, &pdc_pclk, - &dmaca0_hclk, + &dw_dmac0_hclk, &pico_clk, &pio0_mck, &pio1_mck, diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 4b6bd3d099c..cd303901eb5 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -38,6 +38,15 @@ config INTEL_IOP_ADMA help Enable support for the Intel(R) IOP Series RAID engines. +config DW_DMAC + tristate "Synopsys DesignWare AHB DMA support" + depends on AVR32 + select DMA_ENGINE + default y if CPU_AT32AP7000 + help + Support the Synopsys DesignWare AHB DMA controller. This + can be integrated in chips such as the Atmel AT32ap7000. + config FSL_DMA bool "Freescale MPC85xx/MPC83xx DMA support" depends on PPC diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 181e3646fbf..14f59527d4f 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -6,3 +6,4 @@ ioatdma-objs := ioat.o ioat_dma.o ioat_dca.o obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o obj-$(CONFIG_FSL_DMA) += fsldma.o obj-$(CONFIG_MV_XOR) += mv_xor.o +obj-$(CONFIG_DW_DMAC) += dw_dmac.o diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c new file mode 100644 index 00000000000..94df9177124 --- /dev/null +++ b/drivers/dma/dw_dmac.c @@ -0,0 +1,1122 @@ +/* + * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on + * AVR32 systems.) + * + * Copyright (C) 2007-2008 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dw_dmac_regs.h" + +/* + * This supports the Synopsys "DesignWare AHB Central DMA Controller", + * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all + * of which use ARM any more). See the "Databook" from Synopsys for + * information beyond what licensees probably provide. + * + * The driver has currently been tested only with the Atmel AT32AP7000, + * which does not support descriptor writeback. + */ + +/* NOTE: DMS+SMS is system-specific. We should get this information + * from the platform code somehow. + */ +#define DWC_DEFAULT_CTLLO (DWC_CTLL_DST_MSIZE(0) \ + | DWC_CTLL_SRC_MSIZE(0) \ + | DWC_CTLL_DMS(0) \ + | DWC_CTLL_SMS(1) \ + | DWC_CTLL_LLP_D_EN \ + | DWC_CTLL_LLP_S_EN) + +/* + * This is configuration-dependent and usually a funny size like 4095. + * Let's round it down to the nearest power of two. + * + * Note that this is a transfer count, i.e. if we transfer 32-bit + * words, we can do 8192 bytes per descriptor. + * + * This parameter is also system-specific. + */ +#define DWC_MAX_COUNT 2048U + +/* + * Number of descriptors to allocate for each channel. This should be + * made configurable somehow; preferably, the clients (at least the + * ones using slave transfers) should be able to give us a hint. + */ +#define NR_DESCS_PER_CHANNEL 64 + +/*----------------------------------------------------------------------*/ + +/* + * Because we're not relying on writeback from the controller (it may not + * even be configured into the core!) we don't need to use dma_pool. These + * descriptors -- and associated data -- are cacheable. We do need to make + * sure their dcache entries are written back before handing them off to + * the controller, though. + */ + +static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) +{ + return list_entry(dwc->active_list.next, struct dw_desc, desc_node); +} + +static struct dw_desc *dwc_first_queued(struct dw_dma_chan *dwc) +{ + return list_entry(dwc->queue.next, struct dw_desc, desc_node); +} + +static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) +{ + struct dw_desc *desc, *_desc; + struct dw_desc *ret = NULL; + unsigned int i = 0; + + spin_lock_bh(&dwc->lock); + list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) { + if (async_tx_test_ack(&desc->txd)) { + list_del(&desc->desc_node); + ret = desc; + break; + } + dev_dbg(&dwc->chan.dev, "desc %p not ACKed\n", desc); + i++; + } + spin_unlock_bh(&dwc->lock); + + dev_vdbg(&dwc->chan.dev, "scanned %u descriptors on freelist\n", i); + + return ret; +} + +static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc) +{ + struct dw_desc *child; + + list_for_each_entry(child, &desc->txd.tx_list, desc_node) + dma_sync_single_for_cpu(dwc->chan.dev.parent, + child->txd.phys, sizeof(child->lli), + DMA_TO_DEVICE); + dma_sync_single_for_cpu(dwc->chan.dev.parent, + desc->txd.phys, sizeof(desc->lli), + DMA_TO_DEVICE); +} + +/* + * Move a descriptor, including any children, to the free list. + * `desc' must not be on any lists. + */ +static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) +{ + if (desc) { + struct dw_desc *child; + + dwc_sync_desc_for_cpu(dwc, desc); + + spin_lock_bh(&dwc->lock); + list_for_each_entry(child, &desc->txd.tx_list, desc_node) + dev_vdbg(&dwc->chan.dev, + "moving child desc %p to freelist\n", + child); + list_splice_init(&desc->txd.tx_list, &dwc->free_list); + dev_vdbg(&dwc->chan.dev, "moving desc %p to freelist\n", desc); + list_add(&desc->desc_node, &dwc->free_list); + spin_unlock_bh(&dwc->lock); + } +} + +/* Called with dwc->lock held and bh disabled */ +static dma_cookie_t +dwc_assign_cookie(struct dw_dma_chan *dwc, struct dw_desc *desc) +{ + dma_cookie_t cookie = dwc->chan.cookie; + + if (++cookie < 0) + cookie = 1; + + dwc->chan.cookie = cookie; + desc->txd.cookie = cookie; + + return cookie; +} + +/*----------------------------------------------------------------------*/ + +/* Called with dwc->lock held and bh disabled */ +static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) +{ + struct dw_dma *dw = to_dw_dma(dwc->chan.device); + + /* ASSERT: channel is idle */ + if (dma_readl(dw, CH_EN) & dwc->mask) { + dev_err(&dwc->chan.dev, + "BUG: Attempted to start non-idle channel\n"); + dev_err(&dwc->chan.dev, + " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", + channel_readl(dwc, SAR), + channel_readl(dwc, DAR), + channel_readl(dwc, LLP), + channel_readl(dwc, CTL_HI), + channel_readl(dwc, CTL_LO)); + + /* The tasklet will hopefully advance the queue... */ + return; + } + + channel_writel(dwc, LLP, first->txd.phys); + channel_writel(dwc, CTL_LO, + DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); + channel_writel(dwc, CTL_HI, 0); + channel_set_bit(dw, CH_EN, dwc->mask); +} + +/*----------------------------------------------------------------------*/ + +static void +dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc) +{ + dma_async_tx_callback callback; + void *param; + struct dma_async_tx_descriptor *txd = &desc->txd; + + dev_vdbg(&dwc->chan.dev, "descriptor %u complete\n", txd->cookie); + + dwc->completed = txd->cookie; + callback = txd->callback; + param = txd->callback_param; + + dwc_sync_desc_for_cpu(dwc, desc); + list_splice_init(&txd->tx_list, &dwc->free_list); + list_move(&desc->desc_node, &dwc->free_list); + + /* + * We use dma_unmap_page() regardless of how the buffers were + * mapped before they were submitted... + */ + if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) + dma_unmap_page(dwc->chan.dev.parent, desc->lli.dar, desc->len, + DMA_FROM_DEVICE); + if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) + dma_unmap_page(dwc->chan.dev.parent, desc->lli.sar, desc->len, + DMA_TO_DEVICE); + + /* + * The API requires that no submissions are done from a + * callback, so we don't need to drop the lock here + */ + if (callback) + callback(param); +} + +static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) +{ + struct dw_desc *desc, *_desc; + LIST_HEAD(list); + + if (dma_readl(dw, CH_EN) & dwc->mask) { + dev_err(&dwc->chan.dev, + "BUG: XFER bit set, but channel not idle!\n"); + + /* Try to continue after resetting the channel... */ + channel_clear_bit(dw, CH_EN, dwc->mask); + while (dma_readl(dw, CH_EN) & dwc->mask) + cpu_relax(); + } + + /* + * Submit queued descriptors ASAP, i.e. before we go through + * the completed ones. + */ + if (!list_empty(&dwc->queue)) + dwc_dostart(dwc, dwc_first_queued(dwc)); + list_splice_init(&dwc->active_list, &list); + list_splice_init(&dwc->queue, &dwc->active_list); + + list_for_each_entry_safe(desc, _desc, &list, desc_node) + dwc_descriptor_complete(dwc, desc); +} + +static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) +{ + dma_addr_t llp; + struct dw_desc *desc, *_desc; + struct dw_desc *child; + u32 status_xfer; + + /* + * Clear block interrupt flag before scanning so that we don't + * miss any, and read LLP before RAW_XFER to ensure it is + * valid if we decide to scan the list. + */ + dma_writel(dw, CLEAR.BLOCK, dwc->mask); + llp = channel_readl(dwc, LLP); + status_xfer = dma_readl(dw, RAW.XFER); + + if (status_xfer & dwc->mask) { + /* Everything we've submitted is done */ + dma_writel(dw, CLEAR.XFER, dwc->mask); + dwc_complete_all(dw, dwc); + return; + } + + dev_vdbg(&dwc->chan.dev, "scan_descriptors: llp=0x%x\n", llp); + + list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { + if (desc->lli.llp == llp) + /* This one is currently in progress */ + return; + + list_for_each_entry(child, &desc->txd.tx_list, desc_node) + if (child->lli.llp == llp) + /* Currently in progress */ + return; + + /* + * No descriptors so far seem to be in progress, i.e. + * this one must be done. + */ + dwc_descriptor_complete(dwc, desc); + } + + dev_err(&dwc->chan.dev, + "BUG: All descriptors done, but channel not idle!\n"); + + /* Try to continue after resetting the channel... */ + channel_clear_bit(dw, CH_EN, dwc->mask); + while (dma_readl(dw, CH_EN) & dwc->mask) + cpu_relax(); + + if (!list_empty(&dwc->queue)) { + dwc_dostart(dwc, dwc_first_queued(dwc)); + list_splice_init(&dwc->queue, &dwc->active_list); + } +} + +static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) +{ + dev_printk(KERN_CRIT, &dwc->chan.dev, + " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", + lli->sar, lli->dar, lli->llp, + lli->ctlhi, lli->ctllo); +} + +static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) +{ + struct dw_desc *bad_desc; + struct dw_desc *child; + + dwc_scan_descriptors(dw, dwc); + + /* + * The descriptor currently at the head of the active list is + * borked. Since we don't have any way to report errors, we'll + * just have to scream loudly and try to carry on. + */ + bad_desc = dwc_first_active(dwc); + list_del_init(&bad_desc->desc_node); + list_splice_init(&dwc->queue, dwc->active_list.prev); + + /* Clear the error flag and try to restart the controller */ + dma_writel(dw, CLEAR.ERROR, dwc->mask); + if (!list_empty(&dwc->active_list)) + dwc_dostart(dwc, dwc_first_active(dwc)); + + /* + * KERN_CRITICAL may seem harsh, but since this only happens + * when someone submits a bad physical address in a + * descriptor, we should consider ourselves lucky that the + * controller flagged an error instead of scribbling over + * random memory locations. + */ + dev_printk(KERN_CRIT, &dwc->chan.dev, + "Bad descriptor submitted for DMA!\n"); + dev_printk(KERN_CRIT, &dwc->chan.dev, + " cookie: %d\n", bad_desc->txd.cookie); + dwc_dump_lli(dwc, &bad_desc->lli); + list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node) + dwc_dump_lli(dwc, &child->lli); + + /* Pretend the descriptor completed successfully */ + dwc_descriptor_complete(dwc, bad_desc); +} + +static void dw_dma_tasklet(unsigned long data) +{ + struct dw_dma *dw = (struct dw_dma *)data; + struct dw_dma_chan *dwc; + u32 status_block; + u32 status_xfer; + u32 status_err; + int i; + + status_block = dma_readl(dw, RAW.BLOCK); + status_xfer = dma_readl(dw, RAW.BLOCK); + status_err = dma_readl(dw, RAW.ERROR); + + dev_vdbg(dw->dma.dev, "tasklet: status_block=%x status_err=%x\n", + status_block, status_err); + + for (i = 0; i < dw->dma.chancnt; i++) { + dwc = &dw->chan[i]; + spin_lock(&dwc->lock); + if (status_err & (1 << i)) + dwc_handle_error(dw, dwc); + else if ((status_block | status_xfer) & (1 << i)) + dwc_scan_descriptors(dw, dwc); + spin_unlock(&dwc->lock); + } + + /* + * Re-enable interrupts. Block Complete interrupts are only + * enabled if the INT_EN bit in the descriptor is set. This + * will trigger a scan before the whole list is done. + */ + channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); + channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask); + channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); +} + +static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) +{ + struct dw_dma *dw = dev_id; + u32 status; + + dev_vdbg(dw->dma.dev, "interrupt: status=0x%x\n", + dma_readl(dw, STATUS_INT)); + + /* + * Just disable the interrupts. We'll turn them back on in the + * softirq handler. + */ + channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); + channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); + channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); + + status = dma_readl(dw, STATUS_INT); + if (status) { + dev_err(dw->dma.dev, + "BUG: Unexpected interrupts pending: 0x%x\n", + status); + + /* Try to recover */ + channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1); + channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1); + channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1); + channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1); + channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1); + } + + tasklet_schedule(&dw->tasklet); + + return IRQ_HANDLED; +} + +/*----------------------------------------------------------------------*/ + +static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct dw_desc *desc = txd_to_dw_desc(tx); + struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan); + dma_cookie_t cookie; + + spin_lock_bh(&dwc->lock); + cookie = dwc_assign_cookie(dwc, desc); + + /* + * REVISIT: We should attempt to chain as many descriptors as + * possible, perhaps even appending to those already submitted + * for DMA. But this is hard to do in a race-free manner. + */ + if (list_empty(&dwc->active_list)) { + dev_vdbg(&tx->chan->dev, "tx_submit: started %u\n", + desc->txd.cookie); + dwc_dostart(dwc, desc); + list_add_tail(&desc->desc_node, &dwc->active_list); + } else { + dev_vdbg(&tx->chan->dev, "tx_submit: queued %u\n", + desc->txd.cookie); + + list_add_tail(&desc->desc_node, &dwc->queue); + } + + spin_unlock_bh(&dwc->lock); + + return cookie; +} + +static struct dma_async_tx_descriptor * +dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, + size_t len, unsigned long flags) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + struct dw_desc *desc; + struct dw_desc *first; + struct dw_desc *prev; + size_t xfer_count; + size_t offset; + unsigned int src_width; + unsigned int dst_width; + u32 ctllo; + + dev_vdbg(&chan->dev, "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n", + dest, src, len, flags); + + if (unlikely(!len)) { + dev_dbg(&chan->dev, "prep_dma_memcpy: length is zero!\n"); + return NULL; + } + + /* + * We can be a lot more clever here, but this should take care + * of the most common optimization. + */ + if (!((src | dest | len) & 3)) + src_width = dst_width = 2; + else if (!((src | dest | len) & 1)) + src_width = dst_width = 1; + else + src_width = dst_width = 0; + + ctllo = DWC_DEFAULT_CTLLO + | DWC_CTLL_DST_WIDTH(dst_width) + | DWC_CTLL_SRC_WIDTH(src_width) + | DWC_CTLL_DST_INC + | DWC_CTLL_SRC_INC + | DWC_CTLL_FC_M2M; + prev = first = NULL; + + for (offset = 0; offset < len; offset += xfer_count << src_width) { + xfer_count = min_t(size_t, (len - offset) >> src_width, + DWC_MAX_COUNT); + + desc = dwc_desc_get(dwc); + if (!desc) + goto err_desc_get; + + desc->lli.sar = src + offset; + desc->lli.dar = dest + offset; + desc->lli.ctllo = ctllo; + desc->lli.ctlhi = xfer_count; + + if (!first) { + first = desc; + } else { + prev->lli.llp = desc->txd.phys; + dma_sync_single_for_device(chan->dev.parent, + prev->txd.phys, sizeof(prev->lli), + DMA_TO_DEVICE); + list_add_tail(&desc->desc_node, + &first->txd.tx_list); + } + prev = desc; + } + + + if (flags & DMA_PREP_INTERRUPT) + /* Trigger interrupt after last block */ + prev->lli.ctllo |= DWC_CTLL_INT_EN; + + prev->lli.llp = 0; + dma_sync_single_for_device(chan->dev.parent, + prev->txd.phys, sizeof(prev->lli), + DMA_TO_DEVICE); + + first->txd.flags = flags; + first->len = len; + + return &first->txd; + +err_desc_get: + dwc_desc_put(dwc, first); + return NULL; +} + +static struct dma_async_tx_descriptor * +dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sg_len, enum dma_data_direction direction, + unsigned long flags) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + struct dw_dma_slave *dws = dwc->dws; + struct dw_desc *prev; + struct dw_desc *first; + u32 ctllo; + dma_addr_t reg; + unsigned int reg_width; + unsigned int mem_width; + unsigned int i; + struct scatterlist *sg; + size_t total_len = 0; + + dev_vdbg(&chan->dev, "prep_dma_slave\n"); + + if (unlikely(!dws || !sg_len)) + return NULL; + + reg_width = dws->slave.reg_width; + prev = first = NULL; + + sg_len = dma_map_sg(chan->dev.parent, sgl, sg_len, direction); + + switch (direction) { + case DMA_TO_DEVICE: + ctllo = (DWC_DEFAULT_CTLLO + | DWC_CTLL_DST_WIDTH(reg_width) + | DWC_CTLL_DST_FIX + | DWC_CTLL_SRC_INC + | DWC_CTLL_FC_M2P); + reg = dws->slave.tx_reg; + for_each_sg(sgl, sg, sg_len, i) { + struct dw_desc *desc; + u32 len; + u32 mem; + + desc = dwc_desc_get(dwc); + if (!desc) { + dev_err(&chan->dev, + "not enough descriptors available\n"); + goto err_desc_get; + } + + mem = sg_phys(sg); + len = sg_dma_len(sg); + mem_width = 2; + if (unlikely(mem & 3 || len & 3)) + mem_width = 0; + + desc->lli.sar = mem; + desc->lli.dar = reg; + desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width); + desc->lli.ctlhi = len >> mem_width; + + if (!first) { + first = desc; + } else { + prev->lli.llp = desc->txd.phys; + dma_sync_single_for_device(chan->dev.parent, + prev->txd.phys, + sizeof(prev->lli), + DMA_TO_DEVICE); + list_add_tail(&desc->desc_node, + &first->txd.tx_list); + } + prev = desc; + total_len += len; + } + break; + case DMA_FROM_DEVICE: + ctllo = (DWC_DEFAULT_CTLLO + | DWC_CTLL_SRC_WIDTH(reg_width) + | DWC_CTLL_DST_INC + | DWC_CTLL_SRC_FIX + | DWC_CTLL_FC_P2M); + + reg = dws->slave.rx_reg; + for_each_sg(sgl, sg, sg_len, i) { + struct dw_desc *desc; + u32 len; + u32 mem; + + desc = dwc_desc_get(dwc); + if (!desc) { + dev_err(&chan->dev, + "not enough descriptors available\n"); + goto err_desc_get; + } + + mem = sg_phys(sg); + len = sg_dma_len(sg); + mem_width = 2; + if (unlikely(mem & 3 || len & 3)) + mem_width = 0; + + desc->lli.sar = reg; + desc->lli.dar = mem; + desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width); + desc->lli.ctlhi = len >> reg_width; + + if (!first) { + first = desc; + } else { + prev->lli.llp = desc->txd.phys; + dma_sync_single_for_device(chan->dev.parent, + prev->txd.phys, + sizeof(prev->lli), + DMA_TO_DEVICE); + list_add_tail(&desc->desc_node, + &first->txd.tx_list); + } + prev = desc; + total_len += len; + } + break; + default: + return NULL; + } + + if (flags & DMA_PREP_INTERRUPT) + /* Trigger interrupt after last block */ + prev->lli.ctllo |= DWC_CTLL_INT_EN; + + prev->lli.llp = 0; + dma_sync_single_for_device(chan->dev.parent, + prev->txd.phys, sizeof(prev->lli), + DMA_TO_DEVICE); + + first->len = total_len; + + return &first->txd; + +err_desc_get: + dwc_desc_put(dwc, first); + return NULL; +} + +static void dwc_terminate_all(struct dma_chan *chan) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + struct dw_dma *dw = to_dw_dma(chan->device); + struct dw_desc *desc, *_desc; + LIST_HEAD(list); + + /* + * This is only called when something went wrong elsewhere, so + * we don't really care about the data. Just disable the + * channel. We still have to poll the channel enable bit due + * to AHB/HSB limitations. + */ + spin_lock_bh(&dwc->lock); + + channel_clear_bit(dw, CH_EN, dwc->mask); + + while (dma_readl(dw, CH_EN) & dwc->mask) + cpu_relax(); + + /* active_list entries will end up before queued entries */ + list_splice_init(&dwc->queue, &list); + list_splice_init(&dwc->active_list, &list); + + spin_unlock_bh(&dwc->lock); + + /* Flush all pending and queued descriptors */ + list_for_each_entry_safe(desc, _desc, &list, desc_node) + dwc_descriptor_complete(dwc, desc); +} + +static enum dma_status +dwc_is_tx_complete(struct dma_chan *chan, + dma_cookie_t cookie, + dma_cookie_t *done, dma_cookie_t *used) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + dma_cookie_t last_used; + dma_cookie_t last_complete; + int ret; + + last_complete = dwc->completed; + last_used = chan->cookie; + + ret = dma_async_is_complete(cookie, last_complete, last_used); + if (ret != DMA_SUCCESS) { + dwc_scan_descriptors(to_dw_dma(chan->device), dwc); + + last_complete = dwc->completed; + last_used = chan->cookie; + + ret = dma_async_is_complete(cookie, last_complete, last_used); + } + + if (done) + *done = last_complete; + if (used) + *used = last_used; + + return ret; +} + +static void dwc_issue_pending(struct dma_chan *chan) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + + spin_lock_bh(&dwc->lock); + if (!list_empty(&dwc->queue)) + dwc_scan_descriptors(to_dw_dma(chan->device), dwc); + spin_unlock_bh(&dwc->lock); +} + +static int dwc_alloc_chan_resources(struct dma_chan *chan, + struct dma_client *client) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + struct dw_dma *dw = to_dw_dma(chan->device); + struct dw_desc *desc; + struct dma_slave *slave; + struct dw_dma_slave *dws; + int i; + u32 cfghi; + u32 cfglo; + + dev_vdbg(&chan->dev, "alloc_chan_resources\n"); + + /* Channels doing slave DMA can only handle one client. */ + if (dwc->dws || client->slave) { + if (chan->client_count) + return -EBUSY; + } + + /* ASSERT: channel is idle */ + if (dma_readl(dw, CH_EN) & dwc->mask) { + dev_dbg(&chan->dev, "DMA channel not idle?\n"); + return -EIO; + } + + dwc->completed = chan->cookie = 1; + + cfghi = DWC_CFGH_FIFO_MODE; + cfglo = 0; + + slave = client->slave; + if (slave) { + /* + * We need controller-specific data to set up slave + * transfers. + */ + BUG_ON(!slave->dma_dev || slave->dma_dev != dw->dma.dev); + + dws = container_of(slave, struct dw_dma_slave, slave); + + dwc->dws = dws; + cfghi = dws->cfg_hi; + cfglo = dws->cfg_lo; + } else { + dwc->dws = NULL; + } + + channel_writel(dwc, CFG_LO, cfglo); + channel_writel(dwc, CFG_HI, cfghi); + + /* + * NOTE: some controllers may have additional features that we + * need to initialize here, like "scatter-gather" (which + * doesn't mean what you think it means), and status writeback. + */ + + spin_lock_bh(&dwc->lock); + i = dwc->descs_allocated; + while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) { + spin_unlock_bh(&dwc->lock); + + desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL); + if (!desc) { + dev_info(&chan->dev, + "only allocated %d descriptors\n", i); + spin_lock_bh(&dwc->lock); + break; + } + + dma_async_tx_descriptor_init(&desc->txd, chan); + desc->txd.tx_submit = dwc_tx_submit; + desc->txd.flags = DMA_CTRL_ACK; + INIT_LIST_HEAD(&desc->txd.tx_list); + desc->txd.phys = dma_map_single(chan->dev.parent, &desc->lli, + sizeof(desc->lli), DMA_TO_DEVICE); + dwc_desc_put(dwc, desc); + + spin_lock_bh(&dwc->lock); + i = ++dwc->descs_allocated; + } + + /* Enable interrupts */ + channel_set_bit(dw, MASK.XFER, dwc->mask); + channel_set_bit(dw, MASK.BLOCK, dwc->mask); + channel_set_bit(dw, MASK.ERROR, dwc->mask); + + spin_unlock_bh(&dwc->lock); + + dev_dbg(&chan->dev, + "alloc_chan_resources allocated %d descriptors\n", i); + + return i; +} + +static void dwc_free_chan_resources(struct dma_chan *chan) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + struct dw_dma *dw = to_dw_dma(chan->device); + struct dw_desc *desc, *_desc; + LIST_HEAD(list); + + dev_dbg(&chan->dev, "free_chan_resources (descs allocated=%u)\n", + dwc->descs_allocated); + + /* ASSERT: channel is idle */ + BUG_ON(!list_empty(&dwc->active_list)); + BUG_ON(!list_empty(&dwc->queue)); + BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask); + + spin_lock_bh(&dwc->lock); + list_splice_init(&dwc->free_list, &list); + dwc->descs_allocated = 0; + dwc->dws = NULL; + + /* Disable interrupts */ + channel_clear_bit(dw, MASK.XFER, dwc->mask); + channel_clear_bit(dw, MASK.BLOCK, dwc->mask); + channel_clear_bit(dw, MASK.ERROR, dwc->mask); + + spin_unlock_bh(&dwc->lock); + + list_for_each_entry_safe(desc, _desc, &list, desc_node) { + dev_vdbg(&chan->dev, " freeing descriptor %p\n", desc); + dma_unmap_single(chan->dev.parent, desc->txd.phys, + sizeof(desc->lli), DMA_TO_DEVICE); + kfree(desc); + } + + dev_vdbg(&chan->dev, "free_chan_resources done\n"); +} + +/*----------------------------------------------------------------------*/ + +static void dw_dma_off(struct dw_dma *dw) +{ + dma_writel(dw, CFG, 0); + + channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); + channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); + channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); + channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); + channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); + + while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) + cpu_relax(); +} + +static int __init dw_probe(struct platform_device *pdev) +{ + struct dw_dma_platform_data *pdata; + struct resource *io; + struct dw_dma *dw; + size_t size; + int irq; + int err; + int i; + + pdata = pdev->dev.platform_data; + if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) + return -EINVAL; + + io = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!io) + return -EINVAL; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + size = sizeof(struct dw_dma); + size += pdata->nr_channels * sizeof(struct dw_dma_chan); + dw = kzalloc(size, GFP_KERNEL); + if (!dw) + return -ENOMEM; + + if (!request_mem_region(io->start, DW_REGLEN, pdev->dev.driver->name)) { + err = -EBUSY; + goto err_kfree; + } + + memset(dw, 0, sizeof *dw); + + dw->regs = ioremap(io->start, DW_REGLEN); + if (!dw->regs) { + err = -ENOMEM; + goto err_release_r; + } + + dw->clk = clk_get(&pdev->dev, "hclk"); + if (IS_ERR(dw->clk)) { + err = PTR_ERR(dw->clk); + goto err_clk; + } + clk_enable(dw->clk); + + /* force dma off, just in case */ + dw_dma_off(dw); + + err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw); + if (err) + goto err_irq; + + platform_set_drvdata(pdev, dw); + + tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); + + dw->all_chan_mask = (1 << pdata->nr_channels) - 1; + + INIT_LIST_HEAD(&dw->dma.channels); + for (i = 0; i < pdata->nr_channels; i++, dw->dma.chancnt++) { + struct dw_dma_chan *dwc = &dw->chan[i]; + + dwc->chan.device = &dw->dma; + dwc->chan.cookie = dwc->completed = 1; + dwc->chan.chan_id = i; + list_add_tail(&dwc->chan.device_node, &dw->dma.channels); + + dwc->ch_regs = &__dw_regs(dw)->CHAN[i]; + spin_lock_init(&dwc->lock); + dwc->mask = 1 << i; + + INIT_LIST_HEAD(&dwc->active_list); + INIT_LIST_HEAD(&dwc->queue); + INIT_LIST_HEAD(&dwc->free_list); + + channel_clear_bit(dw, CH_EN, dwc->mask); + } + + /* Clear/disable all interrupts on all channels. */ + dma_writel(dw, CLEAR.XFER, dw->all_chan_mask); + dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask); + dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask); + dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask); + dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask); + + channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); + channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); + channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); + channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); + channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); + + dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); + dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); + dw->dma.dev = &pdev->dev; + dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; + dw->dma.device_free_chan_resources = dwc_free_chan_resources; + + dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy; + + dw->dma.device_prep_slave_sg = dwc_prep_slave_sg; + dw->dma.device_terminate_all = dwc_terminate_all; + + dw->dma.device_is_tx_complete = dwc_is_tx_complete; + dw->dma.device_issue_pending = dwc_issue_pending; + + dma_writel(dw, CFG, DW_CFG_DMA_EN); + + printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n", + pdev->dev.bus_id, dw->dma.chancnt); + + dma_async_device_register(&dw->dma); + + return 0; + +err_irq: + clk_disable(dw->clk); + clk_put(dw->clk); +err_clk: + iounmap(dw->regs); + dw->regs = NULL; +err_release_r: + release_resource(io); +err_kfree: + kfree(dw); + return err; +} + +static int __exit dw_remove(struct platform_device *pdev) +{ + struct dw_dma *dw = platform_get_drvdata(pdev); + struct dw_dma_chan *dwc, *_dwc; + struct resource *io; + + dw_dma_off(dw); + dma_async_device_unregister(&dw->dma); + + free_irq(platform_get_irq(pdev, 0), dw); + tasklet_kill(&dw->tasklet); + + list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, + chan.device_node) { + list_del(&dwc->chan.device_node); + channel_clear_bit(dw, CH_EN, dwc->mask); + } + + clk_disable(dw->clk); + clk_put(dw->clk); + + iounmap(dw->regs); + dw->regs = NULL; + + io = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(io->start, DW_REGLEN); + + kfree(dw); + + return 0; +} + +static void dw_shutdown(struct platform_device *pdev) +{ + struct dw_dma *dw = platform_get_drvdata(pdev); + + dw_dma_off(platform_get_drvdata(pdev)); + clk_disable(dw->clk); +} + +static int dw_suspend_late(struct platform_device *pdev, pm_message_t mesg) +{ + struct dw_dma *dw = platform_get_drvdata(pdev); + + dw_dma_off(platform_get_drvdata(pdev)); + clk_disable(dw->clk); + return 0; +} + +static int dw_resume_early(struct platform_device *pdev) +{ + struct dw_dma *dw = platform_get_drvdata(pdev); + + clk_enable(dw->clk); + dma_writel(dw, CFG, DW_CFG_DMA_EN); + return 0; + +} + +static struct platform_driver dw_driver = { + .remove = __exit_p(dw_remove), + .shutdown = dw_shutdown, + .suspend_late = dw_suspend_late, + .resume_early = dw_resume_early, + .driver = { + .name = "dw_dmac", + }, +}; + +static int __init dw_init(void) +{ + return platform_driver_probe(&dw_driver, dw_probe); +} +module_init(dw_init); + +static void __exit dw_exit(void) +{ + platform_driver_unregister(&dw_driver); +} +module_exit(dw_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver"); +MODULE_AUTHOR("Haavard Skinnemoen "); diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h new file mode 100644 index 00000000000..00fdd187bb0 --- /dev/null +++ b/drivers/dma/dw_dmac_regs.h @@ -0,0 +1,225 @@ +/* + * Driver for the Synopsys DesignWare AHB DMA Controller + * + * Copyright (C) 2005-2007 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include + +#define DW_DMA_MAX_NR_CHANNELS 8 + +/* + * Redefine this macro to handle differences between 32- and 64-bit + * addressing, big vs. little endian, etc. + */ +#define DW_REG(name) u32 name; u32 __pad_##name + +/* Hardware register definitions. */ +struct dw_dma_chan_regs { + DW_REG(SAR); /* Source Address Register */ + DW_REG(DAR); /* Destination Address Register */ + DW_REG(LLP); /* Linked List Pointer */ + u32 CTL_LO; /* Control Register Low */ + u32 CTL_HI; /* Control Register High */ + DW_REG(SSTAT); + DW_REG(DSTAT); + DW_REG(SSTATAR); + DW_REG(DSTATAR); + u32 CFG_LO; /* Configuration Register Low */ + u32 CFG_HI; /* Configuration Register High */ + DW_REG(SGR); + DW_REG(DSR); +}; + +struct dw_dma_irq_regs { + DW_REG(XFER); + DW_REG(BLOCK); + DW_REG(SRC_TRAN); + DW_REG(DST_TRAN); + DW_REG(ERROR); +}; + +struct dw_dma_regs { + /* per-channel registers */ + struct dw_dma_chan_regs CHAN[DW_DMA_MAX_NR_CHANNELS]; + + /* irq handling */ + struct dw_dma_irq_regs RAW; /* r */ + struct dw_dma_irq_regs STATUS; /* r (raw & mask) */ + struct dw_dma_irq_regs MASK; /* rw (set = irq enabled) */ + struct dw_dma_irq_regs CLEAR; /* w (ack, affects "raw") */ + + DW_REG(STATUS_INT); /* r */ + + /* software handshaking */ + DW_REG(REQ_SRC); + DW_REG(REQ_DST); + DW_REG(SGL_REQ_SRC); + DW_REG(SGL_REQ_DST); + DW_REG(LAST_SRC); + DW_REG(LAST_DST); + + /* miscellaneous */ + DW_REG(CFG); + DW_REG(CH_EN); + DW_REG(ID); + DW_REG(TEST); + + /* optional encoded params, 0x3c8..0x3 */ +}; + +/* Bitfields in CTL_LO */ +#define DWC_CTLL_INT_EN (1 << 0) /* irqs enabled? */ +#define DWC_CTLL_DST_WIDTH(n) ((n)<<1) /* bytes per element */ +#define DWC_CTLL_SRC_WIDTH(n) ((n)<<4) +#define DWC_CTLL_DST_INC (0<<7) /* DAR update/not */ +#define DWC_CTLL_DST_DEC (1<<7) +#define DWC_CTLL_DST_FIX (2<<7) +#define DWC_CTLL_SRC_INC (0<<7) /* SAR update/not */ +#define DWC_CTLL_SRC_DEC (1<<9) +#define DWC_CTLL_SRC_FIX (2<<9) +#define DWC_CTLL_DST_MSIZE(n) ((n)<<11) /* burst, #elements */ +#define DWC_CTLL_SRC_MSIZE(n) ((n)<<14) +#define DWC_CTLL_S_GATH_EN (1 << 17) /* src gather, !FIX */ +#define DWC_CTLL_D_SCAT_EN (1 << 18) /* dst scatter, !FIX */ +#define DWC_CTLL_FC_M2M (0 << 20) /* mem-to-mem */ +#define DWC_CTLL_FC_M2P (1 << 20) /* mem-to-periph */ +#define DWC_CTLL_FC_P2M (2 << 20) /* periph-to-mem */ +#define DWC_CTLL_FC_P2P (3 << 20) /* periph-to-periph */ +/* plus 4 transfer types for peripheral-as-flow-controller */ +#define DWC_CTLL_DMS(n) ((n)<<23) /* dst master select */ +#define DWC_CTLL_SMS(n) ((n)<<25) /* src master select */ +#define DWC_CTLL_LLP_D_EN (1 << 27) /* dest block chain */ +#define DWC_CTLL_LLP_S_EN (1 << 28) /* src block chain */ + +/* Bitfields in CTL_HI */ +#define DWC_CTLH_DONE 0x00001000 +#define DWC_CTLH_BLOCK_TS_MASK 0x00000fff + +/* Bitfields in CFG_LO. Platform-configurable bits are in */ +#define DWC_CFGL_CH_SUSP (1 << 8) /* pause xfer */ +#define DWC_CFGL_FIFO_EMPTY (1 << 9) /* pause xfer */ +#define DWC_CFGL_HS_DST (1 << 10) /* handshake w/dst */ +#define DWC_CFGL_HS_SRC (1 << 11) /* handshake w/src */ +#define DWC_CFGL_MAX_BURST(x) ((x) << 20) +#define DWC_CFGL_RELOAD_SAR (1 << 30) +#define DWC_CFGL_RELOAD_DAR (1 << 31) + +/* Bitfields in CFG_HI. Platform-configurable bits are in */ +#define DWC_CFGH_DS_UPD_EN (1 << 5) +#define DWC_CFGH_SS_UPD_EN (1 << 6) + +/* Bitfields in SGR */ +#define DWC_SGR_SGI(x) ((x) << 0) +#define DWC_SGR_SGC(x) ((x) << 20) + +/* Bitfields in DSR */ +#define DWC_DSR_DSI(x) ((x) << 0) +#define DWC_DSR_DSC(x) ((x) << 20) + +/* Bitfields in CFG */ +#define DW_CFG_DMA_EN (1 << 0) + +#define DW_REGLEN 0x400 + +struct dw_dma_chan { + struct dma_chan chan; + void __iomem *ch_regs; + u8 mask; + + spinlock_t lock; + + /* these other elements are all protected by lock */ + dma_cookie_t completed; + struct list_head active_list; + struct list_head queue; + struct list_head free_list; + + struct dw_dma_slave *dws; + + unsigned int descs_allocated; +}; + +static inline struct dw_dma_chan_regs __iomem * +__dwc_regs(struct dw_dma_chan *dwc) +{ + return dwc->ch_regs; +} + +#define channel_readl(dwc, name) \ + __raw_readl(&(__dwc_regs(dwc)->name)) +#define channel_writel(dwc, name, val) \ + __raw_writel((val), &(__dwc_regs(dwc)->name)) + +static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan) +{ + return container_of(chan, struct dw_dma_chan, chan); +} + + +struct dw_dma { + struct dma_device dma; + void __iomem *regs; + struct tasklet_struct tasklet; + struct clk *clk; + + u8 all_chan_mask; + + struct dw_dma_chan chan[0]; +}; + +static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw) +{ + return dw->regs; +} + +#define dma_readl(dw, name) \ + __raw_readl(&(__dw_regs(dw)->name)) +#define dma_writel(dw, name, val) \ + __raw_writel((val), &(__dw_regs(dw)->name)) + +#define channel_set_bit(dw, reg, mask) \ + dma_writel(dw, reg, ((mask) << 8) | (mask)) +#define channel_clear_bit(dw, reg, mask) \ + dma_writel(dw, reg, ((mask) << 8) | 0) + +static inline struct dw_dma *to_dw_dma(struct dma_device *ddev) +{ + return container_of(ddev, struct dw_dma, dma); +} + +/* LLI == Linked List Item; a.k.a. DMA block descriptor */ +struct dw_lli { + /* values that are not changed by hardware */ + dma_addr_t sar; + dma_addr_t dar; + dma_addr_t llp; /* chain to next lli */ + u32 ctllo; + /* values that may get written back: */ + u32 ctlhi; + /* sstat and dstat can snapshot peripheral register state. + * silicon config may discard either or both... + */ + u32 sstat; + u32 dstat; +}; + +struct dw_desc { + /* FIRST values the hardware uses */ + struct dw_lli lli; + + /* THEN values for driver housekeeping */ + struct list_head desc_node; + struct dma_async_tx_descriptor txd; + size_t len; +}; + +static inline struct dw_desc * +txd_to_dw_desc(struct dma_async_tx_descriptor *txd) +{ + return container_of(txd, struct dw_desc, txd); +} diff --git a/include/asm-avr32/arch-at32ap/at32ap700x.h b/include/asm-avr32/arch-at32ap/at32ap700x.h index 31e48b0e732..d18a3053be0 100644 --- a/include/asm-avr32/arch-at32ap/at32ap700x.h +++ b/include/asm-avr32/arch-at32ap/at32ap700x.h @@ -30,4 +30,20 @@ #define GPIO_PIN_PD(N) (GPIO_PIOD_BASE + (N)) #define GPIO_PIN_PE(N) (GPIO_PIOE_BASE + (N)) + +/* + * DMAC peripheral hardware handshaking interfaces, used with dw_dmac + */ +#define DMAC_MCI_RX 0 +#define DMAC_MCI_TX 1 +#define DMAC_DAC_TX 2 +#define DMAC_AC97_A_RX 3 +#define DMAC_AC97_A_TX 4 +#define DMAC_AC97_B_RX 5 +#define DMAC_AC97_B_TX 6 +#define DMAC_DMAREQ_0 7 +#define DMAC_DMAREQ_1 8 +#define DMAC_DMAREQ_2 9 +#define DMAC_DMAREQ_3 10 + #endif /* __ASM_ARCH_AT32AP700X_H__ */ diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h new file mode 100644 index 00000000000..04d217b442b --- /dev/null +++ b/include/linux/dw_dmac.h @@ -0,0 +1,62 @@ +/* + * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on + * AVR32 systems.) + * + * Copyright (C) 2007 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef DW_DMAC_H +#define DW_DMAC_H + +#include + +/** + * struct dw_dma_platform_data - Controller configuration parameters + * @nr_channels: Number of channels supported by hardware (max 8) + */ +struct dw_dma_platform_data { + unsigned int nr_channels; +}; + +/** + * struct dw_dma_slave - Controller-specific information about a slave + * @slave: Generic information about the slave + * @ctl_lo: Platform-specific initializer for the CTL_LO register + * @cfg_hi: Platform-specific initializer for the CFG_HI register + * @cfg_lo: Platform-specific initializer for the CFG_LO register + */ +struct dw_dma_slave { + struct dma_slave slave; + u32 cfg_hi; + u32 cfg_lo; +}; + +/* Platform-configurable bits in CFG_HI */ +#define DWC_CFGH_FCMODE (1 << 0) +#define DWC_CFGH_FIFO_MODE (1 << 1) +#define DWC_CFGH_PROTCTL(x) ((x) << 2) +#define DWC_CFGH_SRC_PER(x) ((x) << 7) +#define DWC_CFGH_DST_PER(x) ((x) << 11) + +/* Platform-configurable bits in CFG_LO */ +#define DWC_CFGL_PRIO(x) ((x) << 5) /* priority */ +#define DWC_CFGL_LOCK_CH_XFER (0 << 12) /* scope of LOCK_CH */ +#define DWC_CFGL_LOCK_CH_BLOCK (1 << 12) +#define DWC_CFGL_LOCK_CH_XACT (2 << 12) +#define DWC_CFGL_LOCK_BUS_XFER (0 << 14) /* scope of LOCK_BUS */ +#define DWC_CFGL_LOCK_BUS_BLOCK (1 << 14) +#define DWC_CFGL_LOCK_BUS_XACT (2 << 14) +#define DWC_CFGL_LOCK_CH (1 << 15) /* channel lockout */ +#define DWC_CFGL_LOCK_BUS (1 << 16) /* busmaster lockout */ +#define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */ +#define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */ + +static inline struct dw_dma_slave *to_dw_dma_slave(struct dma_slave *slave) +{ + return container_of(slave, struct dw_dma_slave, slave); +} + +#endif /* DW_DMAC_H */ From e172274ccc55d20536fbdceb6131f38e288541e0 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Fri, 13 Jun 2008 09:17:31 +0100 Subject: [PATCH 0055/1435] [ARM] 5088/3: pxa2xx: add pxa2xx_set_spi_info to register pxa2xx-spi platform devices Add a function to dynamically allocate and register pxa2xx-spi platform devices, to be used by PXA2xx and PXA3xx based systems. Switch pcm027 and lubbock to use it. Signed-off-by: Guennadi Liakhovetski Acked-by: Eric Miao Signed-off-by: Russell King --- arch/arm/mach-pxa/Kconfig | 1 + arch/arm/mach-pxa/devices.c | 18 ++++++++++++++++ arch/arm/mach-pxa/lubbock.c | 10 +-------- arch/arm/mach-pxa/pcm027.c | 31 +++++++++++++++++++++++++++ include/asm-arm/arch-pxa/pxa2xx_spi.h | 2 ++ 5 files changed, 53 insertions(+), 9 deletions(-) diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig index 914bb33dab9..ef7271e3781 100644 --- a/arch/arm/mach-pxa/Kconfig +++ b/arch/arm/mach-pxa/Kconfig @@ -139,6 +139,7 @@ config MACH_PCM027 bool "Phytec phyCORE-PXA270 CPU module (PCM-027)" select PXA27x select IWMMXT + select PXA_SSP endmenu diff --git a/arch/arm/mach-pxa/devices.c b/arch/arm/mach-pxa/devices.c index a6f2390ce66..abc161dd083 100644 --- a/arch/arm/mach-pxa/devices.c +++ b/arch/arm/mach-pxa/devices.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -831,3 +832,20 @@ void __init pxa3xx_set_mci3_info(struct pxamci_platform_data *info) } #endif /* CONFIG_PXA3xx */ + +/* pxa2xx-spi platform-device ID equals respective SSP platform-device ID + 1. + * See comment in arch/arm/mach-pxa/ssp.c::ssp_probe() */ +void __init pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info) +{ + struct platform_device *pd; + + pd = platform_device_alloc("pxa2xx-spi", id); + if (pd == NULL) { + printk(KERN_ERR "pxa2xx-spi: failed to allocate device id %d\n", + id); + return; + } + + pd->dev.platform_data = info; + platform_device_add(pd); +} diff --git a/arch/arm/mach-pxa/lubbock.c b/arch/arm/mach-pxa/lubbock.c index a3fae413920..e041cceab16 100644 --- a/arch/arm/mach-pxa/lubbock.c +++ b/arch/arm/mach-pxa/lubbock.c @@ -226,14 +226,6 @@ static struct pxa2xx_spi_master pxa_ssp_master_info = { .num_chipselect = 0, }; -static struct platform_device pxa_ssp = { - .name = "pxa2xx-spi", - .id = 1, - .dev = { - .platform_data = &pxa_ssp_master_info, - }, -}; - static int lubbock_ads7846_pendown_state(void) { /* TS_BUSY is bit 8 in LUB_MISC_RD, but pendown is irq-only */ @@ -367,7 +359,6 @@ static struct platform_device *devices[] __initdata = { &smc91x_device, &lubbock_flash_device[0], &lubbock_flash_device[1], - &pxa_ssp, }; static struct pxafb_mode_info sharp_lm8v31_mode = { @@ -501,6 +492,7 @@ static void __init lubbock_init(void) lubbock_flash_data[flashboot].name = "boot-rom"; (void) platform_add_devices(devices, ARRAY_SIZE(devices)); + pxa2xx_set_spi_info(1, &pxa_ssp_master_info); spi_register_board_info(spi_board_info, ARRAY_SIZE(spi_board_info)); } diff --git a/arch/arm/mach-pxa/pcm027.c b/arch/arm/mach-pxa/pcm027.c index 3b945eb0aee..377f3be8ce5 100644 --- a/arch/arm/mach-pxa/pcm027.c +++ b/arch/arm/mach-pxa/pcm027.c @@ -24,7 +24,9 @@ #include #include #include +#include #include + #include #include #include @@ -108,6 +110,32 @@ static struct platform_device smc91x_device = { .resource = smc91x_resources, }; +/* + * SPI host and devices + */ +static struct pxa2xx_spi_master pxa_ssp_master_info = { + .num_chipselect = 1, +}; + +static struct max7301_platform_data max7301_info = { + .base = -1, +}; + +/* bus_num must match id in pxa2xx_set_spi_info() call */ +static struct spi_board_info spi_board_info[] __initdata = { + { + .modalias = "max7301", + .platform_data = &max7301_info, + .max_speed_hz = 13000000, + .bus_num = 1, + .chip_select = 0, + .mode = SPI_MODE_0, + }, +}; + +/* + * NOR flash + */ static struct physmap_flash_data pcm027_flash_data = { .width = 4, }; @@ -190,6 +218,9 @@ static void __init pcm027_init(void) #ifdef CONFIG_MACH_PCM990_BASEBOARD pcm990_baseboard_init(); #endif + + pxa2xx_set_spi_info(1, &pxa_ssp_master_info); + spi_register_board_info(spi_board_info, ARRAY_SIZE(spi_board_info)); } static void __init pcm027_map_io(void) diff --git a/include/asm-arm/arch-pxa/pxa2xx_spi.h b/include/asm-arm/arch-pxa/pxa2xx_spi.h index 3459fb26ce9..2206cb61a9f 100644 --- a/include/asm-arm/arch-pxa/pxa2xx_spi.h +++ b/include/asm-arm/arch-pxa/pxa2xx_spi.h @@ -41,4 +41,6 @@ struct pxa2xx_spi_chip { void (*cs_control)(u32 command); }; +extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info); + #endif /*PXA2XX_SPI_H_*/ From c7b4f5ba07a16d2c84ac6cc312488cc6cc0db518 Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Tue, 1 Jul 2008 15:45:10 +0100 Subject: [PATCH 0056/1435] [ARM] 5142/1: pxa: move move zaurus declarations to proper place Signed-off-by: Dmitry Baryshkov Acked-by: Eric Miao Signed-off-by: Russell King --- arch/arm/mach-pxa/Kconfig | 102 +++++++++++++++++++------------------- 1 file changed, 51 insertions(+), 51 deletions(-) diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig index ef7271e3781..01d23e5717a 100644 --- a/arch/arm/mach-pxa/Kconfig +++ b/arch/arm/mach-pxa/Kconfig @@ -58,6 +58,57 @@ config PXA_SHARPSL SL-C3000 (Spitz), SL-C3100 (Borzoi) or SL-C6000x (Tosa) handheld computer. +config MACH_POODLE + bool "Enable Sharp SL-5600 (Poodle) Support" + depends on PXA_SHARPSL + select PXA25x + select SHARP_LOCOMO + select PXA_SSP + +config MACH_CORGI + bool "Enable Sharp SL-C700 (Corgi) Support" + depends on PXA_SHARPSL + select PXA25x + select PXA_SHARP_C7xx + +config MACH_SHEPHERD + bool "Enable Sharp SL-C750 (Shepherd) Support" + depends on PXA_SHARPSL + select PXA25x + select PXA_SHARP_C7xx + +config MACH_HUSKY + bool "Enable Sharp SL-C760 (Husky) Support" + depends on PXA_SHARPSL + select PXA25x + select PXA_SHARP_C7xx + +config MACH_AKITA + bool "Enable Sharp SL-1000 (Akita) Support" + depends on PXA_SHARPSL + select PXA27x + select PXA_SHARP_Cxx00 + select MACH_SPITZ + select I2C + select I2C_PXA + +config MACH_SPITZ + bool "Enable Sharp Zaurus SL-3000 (Spitz) Support" + depends on PXA_SHARPSL + select PXA27x + select PXA_SHARP_Cxx00 + +config MACH_BORZOI + bool "Enable Sharp Zaurus SL-3100 (Borzoi) Support" + depends on PXA_SHARPSL + select PXA27x + select PXA_SHARP_Cxx00 + +config MACH_TOSA + bool "Enable Sharp SL-6000x (Tosa) Support" + depends on PXA_SHARPSL + select PXA25x + config ARCH_PXA_ESERIES bool "PXA based Toshiba e-series PDAs" select PXA25x @@ -199,57 +250,6 @@ endif endmenu -config MACH_POODLE - bool "Enable Sharp SL-5600 (Poodle) Support" - depends on PXA_SHARPSL - select PXA25x - select SHARP_LOCOMO - select PXA_SSP - -config MACH_CORGI - bool "Enable Sharp SL-C700 (Corgi) Support" - depends on PXA_SHARPSL - select PXA25x - select PXA_SHARP_C7xx - -config MACH_SHEPHERD - bool "Enable Sharp SL-C750 (Shepherd) Support" - depends on PXA_SHARPSL - select PXA25x - select PXA_SHARP_C7xx - -config MACH_HUSKY - bool "Enable Sharp SL-C760 (Husky) Support" - depends on PXA_SHARPSL - select PXA25x - select PXA_SHARP_C7xx - -config MACH_AKITA - bool "Enable Sharp SL-1000 (Akita) Support" - depends on PXA_SHARPSL - select PXA27x - select PXA_SHARP_Cxx00 - select MACH_SPITZ - select I2C - select I2C_PXA - -config MACH_SPITZ - bool "Enable Sharp Zaurus SL-3000 (Spitz) Support" - depends on PXA_SHARPSL - select PXA27x - select PXA_SHARP_Cxx00 - -config MACH_BORZOI - bool "Enable Sharp Zaurus SL-3100 (Borzoi) Support" - depends on PXA_SHARPSL - select PXA27x - select PXA_SHARP_Cxx00 - -config MACH_TOSA - bool "Enable Sharp SL-6000x (Tosa) Support" - depends on PXA_SHARPSL - select PXA25x - config PXA25x bool help From c0b8556f2f8146bd38324b14b1ce00f249ba8ed9 Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Wed, 2 Jul 2008 13:50:09 +0100 Subject: [PATCH 0057/1435] [ARM] 5143/1: pxa: further cleanup PXA Kconfig by removing one unnecessary menu level Signed-off-by: Dmitry Baryshkov Acked-by: Eric Miao Signed-off-by: Russell King --- arch/arm/mach-pxa/Kconfig | 48 ++++++++------------------------------- 1 file changed, 10 insertions(+), 38 deletions(-) diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig index 01d23e5717a..faa2c3f6c1a 100644 --- a/arch/arm/mach-pxa/Kconfig +++ b/arch/arm/mach-pxa/Kconfig @@ -20,14 +20,17 @@ endmenu endif -menu "Select target boards" - config ARCH_GUMSTIX bool "Gumstix XScale boards" help Say Y here if you intend to run this kernel on a Gumstix Full Function Minature Computer. +config MACH_GUMSTIX_F + bool "Basix, Connex, ws-200ax, ws-400ax systems" + depends on ARCH_GUMSTIX + select PXA25x + config ARCH_LUBBOCK bool "Intel DBPXA250 Development Platform" select PXA25x @@ -157,6 +160,10 @@ config MACH_TRIZEPS4 bool "Keith und Koep Trizeps4 DIMM-Module" select PXA27x +config MACH_TRIZEPS4_CONXS + bool "ConXS Eval Board" + depends on MACH_TRIZEPS4 + config MACH_EM_X270 bool "CompuLab EM-x270 platform" select PXA27x @@ -192,17 +199,10 @@ config MACH_PCM027 select IWMMXT select PXA_SSP -endmenu - -choice - prompt "Used baseboard" - depends on MACH_PCM027 - config MACH_PCM990_BASEBOARD bool "PHYTEC PCM-990 development board" select HAVE_PWM - -endchoice + depends on MACH_PCM027 choice prompt "display on pcm990" @@ -219,34 +219,6 @@ config PCM990_DISPLAY_NONE endchoice -if ARCH_GUMSTIX - -choice - prompt "Select target Gumstix board" - -config MACH_GUMSTIX_F - bool "Basix, Connex, ws-200ax, ws-400ax systems" - select PXA25x - -endchoice - -endif - - -if MACH_TRIZEPS4 - -choice - prompt "Select base board for Trizeps 4 module" - -config MACH_TRIZEPS4_CONXS - bool "ConXS Eval Board" - -config MACH_TRIZEPS4_ANY - bool "another Board" - -endchoice - -endif endmenu From 6a566fbbcc902df6c9c4dc151bf399018083fb10 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Mon, 16 Jun 2008 08:16:46 +0100 Subject: [PATCH 0058/1435] [ARM] 5095/2: pcm990: switch from pxa_gpio_mode to pxa2xx_mfp_config pxa_gpio_mode() is deprecated, use the new pxa2xx_mfp_config() function to configure GPIOs in pcm990 platform code. Convert "array, ARRAY_SIZE(array)" to "ARRAY_AND_SIZE(array)" while at it. Signed-off-by: Guennadi Liakhovetski Signed-off-by: Russell King --- arch/arm/mach-pxa/pcm990-baseboard.c | 74 +++++++++++++++------------- 1 file changed, 41 insertions(+), 33 deletions(-) diff --git a/arch/arm/mach-pxa/pcm990-baseboard.c b/arch/arm/mach-pxa/pcm990-baseboard.c index 5d87c7c866e..30023b00e47 100644 --- a/arch/arm/mach-pxa/pcm990-baseboard.c +++ b/arch/arm/mach-pxa/pcm990-baseboard.c @@ -33,14 +33,30 @@ #include #include #include -#include #include #include #include #include #include +#include #include "devices.h" +#include "generic.h" + +static unsigned long pcm990_pin_config[] __initdata = { + /* MMC */ + GPIO32_MMC_CLK, + GPIO112_MMC_CMD, + GPIO92_MMC_DAT_0, + GPIO109_MMC_DAT_1, + GPIO110_MMC_DAT_2, + GPIO111_MMC_DAT_3, + /* USB */ + GPIO88_USBH1_PWR, + GPIO89_USBH1_PEN, + /* PWM0 */ + GPIO16_PWM0_OUT, +}; /* * pcm990_lcd_power - control power supply to the LCD @@ -277,16 +293,6 @@ static int pcm990_mci_init(struct device *dev, irq_handler_t mci_detect_int, { int err; - /* - * enable GPIO for PXA27x MMC controller - */ - pxa_gpio_mode(GPIO32_MMCCLK_MD); - pxa_gpio_mode(GPIO112_MMCCMD_MD); - pxa_gpio_mode(GPIO92_MMCDAT0_MD); - pxa_gpio_mode(GPIO109_MMCDAT1_MD); - pxa_gpio_mode(GPIO110_MMCDAT2_MD); - pxa_gpio_mode(GPIO111_MMCDAT3_MD); - err = request_irq(PCM027_MMCDET_IRQ, mci_detect_int, IRQF_DISABLED, "MMC card detect", data); if (err) @@ -333,8 +339,6 @@ static struct pxamci_platform_data pcm990_mci_platform_data = { */ static int pcm990_ohci_init(struct device *dev) { - pxa_gpio_mode(PCM990_USB_OVERCURRENT); - pxa_gpio_mode(PCM990_USB_PWR_EN); /* * disable USB port 2 and 3 * power sense is active low @@ -361,23 +365,27 @@ static struct pxaohci_platform_data pcm990_ohci_platform_data = { * PXA27x Camera specific stuff */ #if defined(CONFIG_VIDEO_PXA27x) || defined(CONFIG_VIDEO_PXA27x_MODULE) +static unsigned long pcm990_camera_pin_config[] = { + /* CIF */ + GPIO98_CIF_DD_0, + GPIO105_CIF_DD_1, + GPIO104_CIF_DD_2, + GPIO103_CIF_DD_3, + GPIO95_CIF_DD_4, + GPIO94_CIF_DD_5, + GPIO93_CIF_DD_6, + GPIO108_CIF_DD_7, + GPIO107_CIF_DD_8, + GPIO106_CIF_DD_9, + GPIO42_CIF_MCLK, + GPIO45_CIF_PCLK, + GPIO43_CIF_FV, + GPIO44_CIF_LV, +}; + static int pcm990_pxacamera_init(struct device *dev) { - pxa_gpio_mode(GPIO98_CIF_DD_0_MD); - pxa_gpio_mode(GPIO105_CIF_DD_1_MD); - pxa_gpio_mode(GPIO104_CIF_DD_2_MD); - pxa_gpio_mode(GPIO103_CIF_DD_3_MD); - pxa_gpio_mode(GPIO95_CIF_DD_4_MD); - pxa_gpio_mode(GPIO94_CIF_DD_5_MD); - pxa_gpio_mode(GPIO93_CIF_DD_6_MD); - pxa_gpio_mode(GPIO108_CIF_DD_7_MD); - pxa_gpio_mode(GPIO107_CIF_DD_8_MD); - pxa_gpio_mode(GPIO106_CIF_DD_9_MD); - pxa_gpio_mode(GPIO42_CIF_MCLK_MD); - pxa_gpio_mode(GPIO45_CIF_PCLK_MD); - pxa_gpio_mode(GPIO43_CIF_FV_MD); - pxa_gpio_mode(GPIO44_CIF_LV_MD); - + pxa2xx_mfp_config(ARRAY_AND_SIZE(pcm990_camera_pin_config)); return 0; } @@ -449,8 +457,10 @@ static struct map_desc pcm990_io_desc[] __initdata = { */ void __init pcm990_baseboard_init(void) { + pxa2xx_mfp_config(ARRAY_AND_SIZE(pcm990_pin_config)); + /* register CPLD access */ - iotable_init(pcm990_io_desc, ARRAY_SIZE(pcm990_io_desc)); + iotable_init(ARRAY_AND_SIZE(pcm990_io_desc)); /* register CPLD's IRQ controller */ pcm990_init_irq(); @@ -458,7 +468,6 @@ void __init pcm990_baseboard_init(void) #ifndef CONFIG_PCM990_DISPLAY_NONE set_pxa_fb_info(&pcm990_fbinfo); #endif - pxa_gpio_mode(GPIO16_PWM0_MD); platform_device_register(&pcm990_backlight_device); /* MMC */ @@ -473,9 +482,8 @@ void __init pcm990_baseboard_init(void) #if defined(CONFIG_VIDEO_PXA27x) || defined(CONFIG_VIDEO_PXA27x_MODULE) pxa_set_camera_info(&pcm990_pxacamera_platform_data); - i2c_register_board_info(0, pcm990_i2c_devices, - ARRAY_SIZE(pcm990_i2c_devices)); + i2c_register_board_info(0, ARRAY_AND_SIZE(pcm990_i2c_devices)); #endif - printk(KERN_INFO"PCM-990 Evaluation baseboard initialized\n"); + printk(KERN_INFO "PCM-990 Evaluation baseboard initialized\n"); } From ff279202781c473545862918a8ef1f4ef7712e47 Mon Sep 17 00:00:00 2001 From: Philipp Zabel Date: Thu, 22 May 2008 10:14:21 +0100 Subject: [PATCH 0059/1435] [ARM] 5042/1: magician: request GPIOs for pda_power Signed-off-by: Philipp Zabel Signed-off-by: Russell King --- arch/arm/mach-pxa/magician.c | 41 ++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/arch/arm/mach-pxa/magician.c b/arch/arm/mach-pxa/magician.c index 01b2fa79021..595e8c4902f 100644 --- a/arch/arm/mach-pxa/magician.c +++ b/arch/arm/mach-pxa/magician.c @@ -511,6 +511,37 @@ static struct platform_device pasic3 = { * External power */ +static int power_supply_init(struct device *dev) +{ + int ret; + + ret = gpio_request(EGPIO_MAGICIAN_CABLE_STATE_AC, "CABLE_STATE_AC"); + if (ret) + goto err_cs_ac; + ret = gpio_request(EGPIO_MAGICIAN_CABLE_STATE_USB, "CABLE_STATE_USB"); + if (ret) + goto err_cs_usb; + ret = gpio_request(EGPIO_MAGICIAN_CHARGE_EN, "CHARGE_EN"); + if (ret) + goto err_chg_en; + ret = gpio_request(GPIO30_MAGICIAN_nCHARGE_EN, "nCHARGE_EN"); + if (!ret) + ret = gpio_direction_output(GPIO30_MAGICIAN_nCHARGE_EN, 0); + if (ret) + goto err_nchg_en; + + return 0; + +err_nchg_en: + gpio_free(EGPIO_MAGICIAN_CHARGE_EN); +err_chg_en: + gpio_free(EGPIO_MAGICIAN_CABLE_STATE_USB); +err_cs_usb: + gpio_free(EGPIO_MAGICIAN_CABLE_STATE_AC); +err_cs_ac: + return ret; +} + static int magician_is_ac_online(void) { return gpio_get_value(EGPIO_MAGICIAN_CABLE_STATE_AC); @@ -527,14 +558,24 @@ static void magician_set_charge(int flags) gpio_set_value(EGPIO_MAGICIAN_CHARGE_EN, flags); } +static void power_supply_exit(struct device *dev) +{ + gpio_free(GPIO30_MAGICIAN_nCHARGE_EN); + gpio_free(EGPIO_MAGICIAN_CHARGE_EN); + gpio_free(EGPIO_MAGICIAN_CABLE_STATE_USB); + gpio_free(EGPIO_MAGICIAN_CABLE_STATE_AC); +} + static char *magician_supplicants[] = { "ds2760-battery.0", "backup-battery" }; static struct pda_power_pdata power_supply_info = { + .init = power_supply_init, .is_ac_online = magician_is_ac_online, .is_usb_online = magician_is_usb_online, .set_charge = magician_set_charge, + .exit = power_supply_exit, .supplied_to = magician_supplicants, .num_supplicants = ARRAY_SIZE(magician_supplicants), }; From e181191acdfde5b1f9690c74a2459dd0a3331c62 Mon Sep 17 00:00:00 2001 From: Philipp Zabel Date: Sun, 22 Jun 2008 13:00:55 +0100 Subject: [PATCH 0060/1435] [ARM] 5119/1: magician: include linux/gpio.h instead of asm/gpio.h Signed-off-by: Philipp Zabel Signed-off-by: Russell King --- arch/arm/mach-pxa/magician.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/mach-pxa/magician.c b/arch/arm/mach-pxa/magician.c index 595e8c4902f..6de2c13bd65 100644 --- a/arch/arm/mach-pxa/magician.c +++ b/arch/arm/mach-pxa/magician.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -27,7 +28,6 @@ #include #include -#include #include #include #include From 7a97010eb4f47c9d2d66bfb838a8638a8d02d9ef Mon Sep 17 00:00:00 2001 From: Philipp Zabel Date: Thu, 26 Jun 2008 21:03:54 +0100 Subject: [PATCH 0061/1435] [ARM] 5125/1: magician: move gpio pin configuration into __initdata section The pin configuration array is only used during board init. Signed-off-by: Philipp Zabel Acked-by: Eric Miao Signed-off-by: Russell King --- arch/arm/mach-pxa/magician.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/mach-pxa/magician.c b/arch/arm/mach-pxa/magician.c index 6de2c13bd65..2dc14c361c1 100644 --- a/arch/arm/mach-pxa/magician.c +++ b/arch/arm/mach-pxa/magician.c @@ -44,7 +44,7 @@ #include "devices.h" #include "generic.h" -static unsigned long magician_pin_config[] = { +static unsigned long magician_pin_config[] __initdata = { /* SDRAM and Static Memory I/O Signals */ GPIO20_nSDCS_2, From e277479aba3d35745c822540e9c3d941aabb4e98 Mon Sep 17 00:00:00 2001 From: Philipp Zabel Date: Thu, 26 Jun 2008 21:04:31 +0100 Subject: [PATCH 0062/1435] [ARM] 5126/1: magician: remove superfluous mtd includes These were only needed for hardcoded flash partition tables, which were never submitted. It is better to have the bootloader pass the partition table to the kernel instead. Signed-off-by: Philipp Zabel Signed-off-by: Russell King --- arch/arm/mach-pxa/magician.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/arm/mach-pxa/magician.c b/arch/arm/mach-pxa/magician.c index 2dc14c361c1..9f249dc726e 100644 --- a/arch/arm/mach-pxa/magician.c +++ b/arch/arm/mach-pxa/magician.c @@ -22,8 +22,6 @@ #include #include #include -#include -#include #include #include #include From d0f7f1dff01698d228dc108895fbb9b13af37dab Mon Sep 17 00:00:00 2001 From: Philipp Zabel Date: Mon, 30 Jun 2008 18:11:35 +0100 Subject: [PATCH 0063/1435] [ARM] 5137/1: magician: MACH_MAGICIAN doesn't need to depend on ARCH_PXA It is only defined inside an "if ARCH_PXA ... endif" block, so the depends on is not needed. Signed-off-by: Philipp Zabel ? Acked-by: Eric Miao Signed-off-by: Russell King --- arch/arm/mach-pxa/Kconfig | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig index faa2c3f6c1a..61a131fd44a 100644 --- a/arch/arm/mach-pxa/Kconfig +++ b/arch/arm/mach-pxa/Kconfig @@ -189,7 +189,6 @@ config MACH_ARMCORE config MACH_MAGICIAN bool "Enable HTC Magician Support" - depends on ARCH_PXA select PXA27x select IWMMXT From 14d1012db8462bbb7139ac4622f92e541d3019d1 Mon Sep 17 00:00:00 2001 From: Philipp Zabel Date: Mon, 30 Jun 2008 18:11:55 +0100 Subject: [PATCH 0064/1435] [ARM] 5138/1: magician: set pwm-backlight .id = -1 There will always be only one pwm-backlight on this device. Signed-off-by: Philipp Zabel Acked-by: Eric Miao Signed-off-by: Russell King --- arch/arm/mach-pxa/magician.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm/mach-pxa/magician.c b/arch/arm/mach-pxa/magician.c index 9f249dc726e..bad5821be41 100644 --- a/arch/arm/mach-pxa/magician.c +++ b/arch/arm/mach-pxa/magician.c @@ -397,6 +397,7 @@ static struct platform_pwm_backlight_data backlight_data = { static struct platform_device backlight = { .name = "pwm-backlight", + .id = -1, .dev = { .parent = &pxa27x_device_pwm0.dev, .platform_data = &backlight_data, From 3c85bce6f820258b49bb74c53d2f47c058199f1b Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Mon, 16 Jun 2008 08:16:46 +0100 Subject: [PATCH 0065/1435] [ARM] 5101/2: EM-X270 updates Convert EM-X270 pin configuration to use MFP tables and gpio library Make device initialization dependent on respective driver CONFIG_ value (like in zylonite) Add keypad and gpio_key devices Signed-off-by: Mike Rapoport Signed-off-by: Russell King --- arch/arm/mach-pxa/em-x270.c | 381 ++++++++++++++++++++++++++++-------- 1 file changed, 297 insertions(+), 84 deletions(-) diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c index e23865affc0..e5cc6ca63c7 100644 --- a/arch/arm/mach-pxa/em-x270.c +++ b/arch/arm/mach-pxa/em-x270.c @@ -1,7 +1,7 @@ /* - * Support for CompuLab EM-x270 platform + * Support for CompuLab EM-X270 platform * - * Copyright (C) 2007 CompuLab, Ltd. + * Copyright (C) 2007, 2008 CompuLab, Ltd. * Author: Mike Rapoport * * This program is free software; you can redistribute it and/or modify @@ -14,31 +14,159 @@ #include #include - #include #include +#include +#include +#include #include - #include +#include #include -#include #include #include #include #include #include -#include +#include #include "generic.h" /* GPIO IRQ usage */ -#define EM_X270_MMC_PD (105) -#define EM_X270_ETHIRQ IRQ_GPIO(41) -#define EM_X270_MMC_IRQ IRQ_GPIO(13) +#define GPIO41_ETHIRQ (41) +#define GPIO13_MMC_CD (13) +#define EM_X270_ETHIRQ IRQ_GPIO(GPIO41_ETHIRQ) +#define EM_X270_MMC_CD IRQ_GPIO(GPIO13_MMC_CD) -static struct resource em_x270_dm9k_resource[] = { +/* NAND control GPIOs */ +#define GPIO11_NAND_CS (11) +#define GPIO56_NAND_RB (56) + +static unsigned long em_x270_pin_config[] = { + /* AC'97 */ + GPIO28_AC97_BITCLK, + GPIO29_AC97_SDATA_IN_0, + GPIO30_AC97_SDATA_OUT, + GPIO31_AC97_SYNC, + GPIO98_AC97_SYSCLK, + GPIO113_AC97_nRESET, + + /* BTUART */ + GPIO42_BTUART_RXD, + GPIO43_BTUART_TXD, + GPIO44_BTUART_CTS, + GPIO45_BTUART_RTS, + + /* STUART */ + GPIO46_STUART_RXD, + GPIO47_STUART_TXD, + + /* MCI controller */ + GPIO32_MMC_CLK, + GPIO112_MMC_CMD, + GPIO92_MMC_DAT_0, + GPIO109_MMC_DAT_1, + GPIO110_MMC_DAT_2, + GPIO111_MMC_DAT_3, + + /* LCD */ + GPIO58_LCD_LDD_0, + GPIO59_LCD_LDD_1, + GPIO60_LCD_LDD_2, + GPIO61_LCD_LDD_3, + GPIO62_LCD_LDD_4, + GPIO63_LCD_LDD_5, + GPIO64_LCD_LDD_6, + GPIO65_LCD_LDD_7, + GPIO66_LCD_LDD_8, + GPIO67_LCD_LDD_9, + GPIO68_LCD_LDD_10, + GPIO69_LCD_LDD_11, + GPIO70_LCD_LDD_12, + GPIO71_LCD_LDD_13, + GPIO72_LCD_LDD_14, + GPIO73_LCD_LDD_15, + GPIO74_LCD_FCLK, + GPIO75_LCD_LCLK, + GPIO76_LCD_PCLK, + GPIO77_LCD_BIAS, + + /* QCI */ + GPIO84_CIF_FV, + GPIO25_CIF_LV, + GPIO53_CIF_MCLK, + GPIO54_CIF_PCLK, + GPIO81_CIF_DD_0, + GPIO55_CIF_DD_1, + GPIO51_CIF_DD_2, + GPIO50_CIF_DD_3, + GPIO52_CIF_DD_4, + GPIO48_CIF_DD_5, + GPIO17_CIF_DD_6, + GPIO12_CIF_DD_7, + + /* I2C */ + GPIO117_I2C_SCL, + GPIO118_I2C_SDA, + + /* Keypad */ + GPIO100_KP_MKIN_0 | WAKEUP_ON_LEVEL_HIGH, + GPIO101_KP_MKIN_1 | WAKEUP_ON_LEVEL_HIGH, + GPIO102_KP_MKIN_2 | WAKEUP_ON_LEVEL_HIGH, + GPIO34_KP_MKIN_3 | WAKEUP_ON_LEVEL_HIGH, + GPIO39_KP_MKIN_4 | WAKEUP_ON_LEVEL_HIGH, + GPIO99_KP_MKIN_5 | WAKEUP_ON_LEVEL_HIGH, + GPIO91_KP_MKIN_6 | WAKEUP_ON_LEVEL_HIGH, + GPIO36_KP_MKIN_7 | WAKEUP_ON_LEVEL_HIGH, + GPIO103_KP_MKOUT_0, + GPIO104_KP_MKOUT_1, + GPIO105_KP_MKOUT_2, + GPIO106_KP_MKOUT_3, + GPIO107_KP_MKOUT_4, + GPIO108_KP_MKOUT_5, + GPIO96_KP_MKOUT_6, + GPIO22_KP_MKOUT_7, + + /* SSP1 */ + GPIO26_SSP1_RXD, + GPIO23_SSP1_SCLK, + GPIO24_SSP1_SFRM, + GPIO57_SSP1_TXD, + + /* SSP2 */ + GPIO19_SSP2_SCLK, + GPIO14_SSP2_SFRM, + GPIO89_SSP2_TXD, + GPIO88_SSP2_RXD, + + /* SDRAM and local bus */ + GPIO15_nCS_1, + GPIO78_nCS_2, + GPIO79_nCS_3, + GPIO80_nCS_4, + GPIO49_nPWE, + GPIO18_RDY, + + /* GPIO */ + GPIO1_GPIO | WAKEUP_ON_EDGE_BOTH, + + /* power controls */ + GPIO20_GPIO | MFP_LPM_DRIVE_LOW, /* GPRS_PWEN */ + GPIO115_GPIO | MFP_LPM_DRIVE_LOW, /* WLAN_PWEN */ + + /* NAND controls */ + GPIO11_GPIO | MFP_LPM_DRIVE_HIGH, /* NAND CE# */ + GPIO56_GPIO, /* NAND Ready/Busy */ + + /* interrupts */ + GPIO13_GPIO, /* MMC card detect */ + GPIO41_GPIO, /* DM9000 interrupt */ +}; + +#if defined(CONFIG_DM9000) || defined(CONFIG_DM9000_MODULE) +static struct resource em_x270_dm9000_resource[] = { [0] = { .start = PXA_CS2_PHYS, .end = PXA_CS2_PHYS + 3, @@ -52,36 +180,34 @@ static struct resource em_x270_dm9k_resource[] = { [2] = { .start = EM_X270_ETHIRQ, .end = EM_X270_ETHIRQ, - .flags = IORESOURCE_IRQ, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE, } }; -/* for the moment we limit ourselves to 32bit IO until some - * better IO routines can be written and tested - */ -static struct dm9000_plat_data em_x270_dm9k_platdata = { +static struct dm9000_plat_data em_x270_dm9000_platdata = { .flags = DM9000_PLATF_32BITONLY, }; -/* Ethernet device */ -static struct platform_device em_x270_dm9k = { +static struct platform_device em_x270_dm9000 = { .name = "dm9000", .id = 0, - .num_resources = ARRAY_SIZE(em_x270_dm9k_resource), - .resource = em_x270_dm9k_resource, + .num_resources = ARRAY_SIZE(em_x270_dm9000_resource), + .resource = em_x270_dm9000_resource, .dev = { - .platform_data = &em_x270_dm9k_platdata, + .platform_data = &em_x270_dm9000_platdata, } }; -/* WM9712 touchscreen controller. Hopefully the driver will make it to - * the mainstream sometime */ -static struct platform_device em_x270_ts = { - .name = "wm97xx-ts", - .id = -1, -}; +static void __init em_x270_init_dm9000(void) +{ + platform_device_register(&em_x270_dm9000); +} +#else +static inline void em_x270_init_dm9000(void) {} +#endif -/* RTC */ +/* V3020 RTC */ +#if defined(CONFIG_RTC_DRV_V3020) || defined(CONFIG_RTC_DRV_V3020_MODULE) static struct resource em_x270_v3020_resource[] = { [0] = { .start = PXA_CS4_PHYS, @@ -104,20 +230,26 @@ static struct platform_device em_x270_rtc = { } }; -/* NAND flash */ -#define GPIO_NAND_CS (11) -#define GPIO_NAND_RB (56) +static void __init em_x270_init_rtc(void) +{ + platform_device_register(&em_x270_rtc); +} +#else +static inline void em_x270_init_rtc(void) {} +#endif +/* NAND flash */ +#if defined(CONFIG_MTD_NAND_PLATFORM) || defined(CONFIG_MTD_NAND_PLATFORM_MODULE) static inline void nand_cs_on(void) { - GPCR(GPIO_NAND_CS) = GPIO_bit(GPIO_NAND_CS); + gpio_set_value(GPIO11_NAND_CS, 0); } static void nand_cs_off(void) { dsb(); - GPSR(GPIO_NAND_CS) = GPIO_bit(GPIO_NAND_CS); + gpio_set_value(GPIO11_NAND_CS, 1); } /* hardware specific access to control-lines */ @@ -157,7 +289,7 @@ static int em_x270_nand_device_ready(struct mtd_info *mtd) { dsb(); - return GPLR(GPIO_NAND_RB) & GPIO_bit(GPIO_NAND_RB); + return gpio_get_value(GPIO56_NAND_RB); } static struct mtd_partition em_x270_partition_info[] = { @@ -210,16 +342,35 @@ static struct platform_device em_x270_nand = { } }; -/* platform devices */ -static struct platform_device *platform_devices[] __initdata = { - &em_x270_dm9k, - &em_x270_ts, - &em_x270_rtc, - &em_x270_nand, -}; +static void __init em_x270_init_nand(void) +{ + int err; + err = gpio_request(GPIO11_NAND_CS, "NAND CS"); + if (err) { + pr_warning("EM-X270: failed to request NAND CS gpio\n"); + return; + } + + gpio_direction_output(GPIO11_NAND_CS, 1); + + err = gpio_request(GPIO56_NAND_RB, "NAND R/B"); + if (err) { + pr_warning("EM-X270: failed to request NAND R/B gpio\n"); + gpio_free(GPIO11_NAND_CS); + return; + } + + gpio_direction_input(GPIO56_NAND_RB); + + platform_device_register(&em_x270_nand); +} +#else +static inline void em_x270_init_nand(void) {} +#endif /* PXA27x OHCI controller setup */ +#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) static int em_x270_ohci_init(struct device *dev) { /* Set the Power Control Polarity Low */ @@ -237,27 +388,23 @@ static struct pxaohci_platform_data em_x270_ohci_platform_data = { .init = em_x270_ohci_init, }; +static void __init em_x270_init_ohci(void) +{ + pxa_set_ohci_info(&em_x270_ohci_platform_data); +} +#else +static inline void em_x270_init_ohci(void) {} +#endif +/* MCI controller setup */ +#if defined(CONFIG_MMC) || defined(CONFIG_MMC_MODULE) static int em_x270_mci_init(struct device *dev, irq_handler_t em_x270_detect_int, void *data) { - int err; - - /* setup GPIO for PXA27x MMC controller */ - pxa_gpio_mode(GPIO32_MMCCLK_MD); - pxa_gpio_mode(GPIO112_MMCCMD_MD); - pxa_gpio_mode(GPIO92_MMCDAT0_MD); - pxa_gpio_mode(GPIO109_MMCDAT1_MD); - pxa_gpio_mode(GPIO110_MMCDAT2_MD); - pxa_gpio_mode(GPIO111_MMCDAT3_MD); - - /* EM-X270 uses GPIO13 as SD power enable */ - pxa_gpio_mode(EM_X270_MMC_PD | GPIO_OUT); - - err = request_irq(EM_X270_MMC_IRQ, em_x270_detect_int, - IRQF_DISABLED | IRQF_TRIGGER_FALLING, - "MMC card detect", data); + int err = request_irq(EM_X270_MMC_CD, em_x270_detect_int, + IRQF_DISABLED | IRQF_TRIGGER_FALLING, + "MMC card detect", data); if (err) { printk(KERN_ERR "%s: can't request MMC card detect IRQ: %d\n", __func__, err); @@ -279,7 +426,8 @@ static void em_x270_mci_setpower(struct device *dev, unsigned int vdd) static void em_x270_mci_exit(struct device *dev, void *data) { - free_irq(EM_X270_MMC_IRQ, data); + int irq = gpio_to_irq(GPIO13_MMC_CD); + free_irq(irq, data); } static struct pxamci_platform_data em_x270_mci_platform_data = { @@ -289,7 +437,16 @@ static struct pxamci_platform_data em_x270_mci_platform_data = { .exit = em_x270_mci_exit, }; +static void __init em_x270_init_mmc(void) +{ + pxa_set_mci_info(&em_x270_mci_platform_data); +} +#else +static inline void em_x270_init_mmc(void) {} +#endif + /* LCD 480x640 */ +#if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE) static struct pxafb_mode_info em_x270_lcd_mode = { .pixclock = 50000, .bpp = 16, @@ -307,40 +464,96 @@ static struct pxafb_mode_info em_x270_lcd_mode = { static struct pxafb_mach_info em_x270_lcd = { .modes = &em_x270_lcd_mode, .num_modes = 1, - .cmap_inverse = 0, - .cmap_static = 0, - .lccr0 = LCCR0_PAS, - .lccr3 = LCCR3_PixClkDiv(0x01) | LCCR3_Acb(0xff), + .lcd_conn = LCD_COLOR_TFT_16BPP, }; +static void __init em_x270_init_lcd(void) +{ + set_pxa_fb_info(&em_x270_lcd); +} +#else +static inline void em_x270_init_lcd(void) {} +#endif + +#if defined(CONFIG_SND_PXA2XX_AC97) || defined(CONFIG_SND_PXA2XX_AC97_MODULE) +static void __init em_x270_init_ac97(void) +{ + pxa_set_ac97_info(NULL); +} +#else +static inline void em_x270_init_ac97(void) {} +#endif + +#if defined(CONFIG_KEYBOARD_PXA27x) || defined(CONFIG_KEYBOARD_PXA27x_MODULE) +static unsigned int em_x270_matrix_keys[] = { + KEY(0, 0, KEY_A), KEY(1, 0, KEY_UP), KEY(2, 1, KEY_B), + KEY(0, 2, KEY_LEFT), KEY(1, 1, KEY_ENTER), KEY(2, 0, KEY_RIGHT), + KEY(0, 1, KEY_C), KEY(1, 2, KEY_DOWN), KEY(2, 2, KEY_D), +}; + +struct pxa27x_keypad_platform_data em_x270_keypad_info = { + /* code map for the matrix keys */ + .matrix_key_rows = 3, + .matrix_key_cols = 3, + .matrix_key_map = em_x270_matrix_keys, + .matrix_key_map_size = ARRAY_SIZE(em_x270_matrix_keys), +}; + +static void __init em_x270_init_keypad(void) +{ + pxa_set_keypad_info(&em_x270_keypad_info); +} +#else +static inline void em_x270_init_keypad(void) {} +#endif + +#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) +static struct gpio_keys_button gpio_keys_button[] = { + [0] = { + .desc = "sleep/wakeup", + .code = KEY_SUSPEND, + .type = EV_PWR, + .gpio = 1, + .wakeup = 1, + }, +}; + +static struct gpio_keys_platform_data em_x270_gpio_keys_data = { + .buttons = gpio_keys_button, + .nbuttons = 1, +}; + +static struct platform_device em_x270_gpio_keys = { + .name = "gpio-keys", + .id = -1, + .dev = { + .platform_data = &em_x270_gpio_keys_data, + }, +}; + +static void __init em_x270_init_gpio_keys(void) +{ + platform_device_register(&em_x270_gpio_keys); +} +#else +static inline void em_x270_init_gpio_keys(void) {} +#endif static void __init em_x270_init(void) { - /* setup LCD */ - set_pxa_fb_info(&em_x270_lcd); + pxa2xx_mfp_config(ARRAY_AND_SIZE(em_x270_pin_config)); - /* register EM-X270 platform devices */ - platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices)); - pxa_set_ac97_info(NULL); - - /* set MCI and OHCI platform parameters */ - pxa_set_mci_info(&em_x270_mci_platform_data); - pxa_set_ohci_info(&em_x270_ohci_platform_data); - - /* setup STUART GPIOs */ - pxa_gpio_mode(GPIO46_STRXD_MD); - pxa_gpio_mode(GPIO47_STTXD_MD); - - /* setup BTUART GPIOs */ - pxa_gpio_mode(GPIO42_BTRXD_MD); - pxa_gpio_mode(GPIO43_BTTXD_MD); - pxa_gpio_mode(GPIO44_BTCTS_MD); - pxa_gpio_mode(GPIO45_BTRTS_MD); - - /* Setup interrupt for dm9000 */ - set_irq_type(EM_X270_ETHIRQ, IRQT_RISING); + em_x270_init_dm9000(); + em_x270_init_rtc(); + em_x270_init_nand(); + em_x270_init_lcd(); + em_x270_init_mmc(); + em_x270_init_ohci(); + em_x270_init_keypad(); + em_x270_init_gpio_keys(); + em_x270_init_ac97(); } -MACHINE_START(EM_X270, "Compulab EM-x270") +MACHINE_START(EM_X270, "Compulab EM-X270") .boot_params = 0xa0000100, .phys_io = 0x40000000, .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc, From 2f01a973729712c373ccc8acde4d218c38f3029a Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Tue, 17 Jun 2008 12:29:58 +0100 Subject: [PATCH 0066/1435] [ARM] 5103/2: CM-X270: update core platform support Convert CM-X270 pin configuration to use MFP tables. Make device initialization dependent on respective driver CONFIG_ value (like in zylonite). Cleanup includes Signed-off-by: Mike Rapoport Acked-by: Eric Miao Signed-off-by: Russell King --- arch/arm/mach-pxa/cm-x270-pci.c | 27 ++- arch/arm/mach-pxa/cm-x270-pci.h | 14 +- arch/arm/mach-pxa/cm-x270.c | 409 ++++++++++++++++++++++---------- 3 files changed, 309 insertions(+), 141 deletions(-) diff --git a/arch/arm/mach-pxa/cm-x270-pci.c b/arch/arm/mach-pxa/cm-x270-pci.c index 319c9ff3ab9..bcf0cde6ccc 100644 --- a/arch/arm/mach-pxa/cm-x270-pci.c +++ b/arch/arm/mach-pxa/cm-x270-pci.c @@ -5,7 +5,7 @@ * * Bits taken from various places. * - * Copyright (C) 2007 Compulab, Ltd. + * Copyright (C) 2007, 2008 Compulab, Ltd. * Mike Rapoport * * This program is free software; you can redistribute it and/or modify @@ -19,16 +19,16 @@ #include #include #include +#include #include -#include #include -#include #include #include -unsigned long it8152_base_address = CMX270_IT8152_VIRT; +unsigned long it8152_base_address; +static int cmx270_it8152_irq_gpio; /* * Only first 64MB of memory can be accessed via PCI. @@ -42,7 +42,7 @@ void __init cmx270_pci_adjust_zones(int node, unsigned long *zone_size, unsigned int sz = SZ_64M >> PAGE_SHIFT; if (machine_is_armcore()) { - pr_info("Adjusting zones for CM-x270\n"); + pr_info("Adjusting zones for CM-X270\n"); /* * Only adjust if > 64M on current system @@ -60,19 +60,20 @@ void __init cmx270_pci_adjust_zones(int node, unsigned long *zone_size, static void cmx270_it8152_irq_demux(unsigned int irq, struct irq_desc *desc) { /* clear our parent irq */ - GEDR(GPIO_IT8152_IRQ) = GPIO_bit(GPIO_IT8152_IRQ); + GEDR(cmx270_it8152_irq_gpio) = GPIO_bit(cmx270_it8152_irq_gpio); it8152_irq_demux(irq, desc); } -void __cmx270_pci_init_irq(void) +void __cmx270_pci_init_irq(int irq_gpio) { it8152_init_irq(); - pxa_gpio_mode(IRQ_TO_GPIO(GPIO_IT8152_IRQ)); - set_irq_type(IRQ_GPIO(GPIO_IT8152_IRQ), IRQT_RISING); - set_irq_chained_handler(IRQ_GPIO(GPIO_IT8152_IRQ), - cmx270_it8152_irq_demux); + cmx270_it8152_irq_gpio = irq_gpio; + + set_irq_type(gpio_to_irq(irq_gpio), IRQT_RISING); + + set_irq_chained_handler(gpio_to_irq(irq_gpio), cmx270_it8152_irq_demux); } #ifdef CONFIG_PM @@ -115,8 +116,8 @@ static int __init cmx270_pci_map_irq(struct pci_dev *dev, u8 slot, u8 pin) /* Here comes the ugly part. The routing is baseboard specific, - but defining a platform for each possible base of CM-x270 is - unrealistic. Here we keep mapping for ATXBase and SB-x270. + but defining a platform for each possible base of CM-X270 is + unrealistic. Here we keep mapping for ATXBase and SB-X270. */ /* ATXBASE PCI slot */ if (slot == 7) diff --git a/arch/arm/mach-pxa/cm-x270-pci.h b/arch/arm/mach-pxa/cm-x270-pci.h index ffe37b66f9a..48f532f4cb5 100644 --- a/arch/arm/mach-pxa/cm-x270-pci.h +++ b/arch/arm/mach-pxa/cm-x270-pci.h @@ -1,13 +1,13 @@ -extern void __cmx270_pci_init_irq(void); +extern void __cmx270_pci_init_irq(int irq_gpio); extern void __cmx270_pci_suspend(void); extern void __cmx270_pci_resume(void); #ifdef CONFIG_PCI -#define cmx270_pci_init_irq __cmx270_pci_init_irq -#define cmx270_pci_suspend __cmx270_pci_suspend -#define cmx270_pci_resume __cmx270_pci_resume +#define cmx270_pci_init_irq(x) __cmx270_pci_init_irq(x) +#define cmx270_pci_suspend(x) __cmx270_pci_suspend(x) +#define cmx270_pci_resume(x) __cmx270_pci_resume(x) #else -#define cmx270_pci_init_irq() do {} while (0) -#define cmx270_pci_suspend() do {} while (0) -#define cmx270_pci_resume() do {} while (0) +#define cmx270_pci_init_irq(x) do {} while (0) +#define cmx270_pci_suspend(x) do {} while (0) +#define cmx270_pci_resume(x) do {} while (0) #endif diff --git a/arch/arm/mach-pxa/cm-x270.c b/arch/arm/mach-pxa/cm-x270.c index b3c3fd72d04..402e807eae5 100644 --- a/arch/arm/mach-pxa/cm-x270.c +++ b/arch/arm/mach-pxa/cm-x270.c @@ -1,7 +1,7 @@ /* * linux/arch/arm/mach-pxa/cm-x270.c * - * Copyright (C) 2007 CompuLab, Ltd. + * Copyright (C) 2007, 2008 CompuLab, Ltd. * Mike Rapoport * * This program is free software; you can redistribute it and/or modify @@ -9,44 +9,156 @@ * published by the Free Software Foundation. */ -#include -#include -#include #include -#include #include -#include -#include +#include +#include #include #include -#include - #include