dect
/
linux-2.6
Archived
13
0
Fork 0

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next

Pull sparc updates from David Miller:

1) Kill off support for sun4c and Cypress sun4m chips.

   And as a result we were able to also kill off that ugly btfixup thing
   that required multi-stage links of the final vmlinux image in the
   Kbuild system.  This should make the kbuild maintainers really happy.

   Thanks a lot to Sam Ravnborg for his tireless efforts to get this
   going.

2) Convert sparc64 to nobootmem.  I suspect now with sparc32 being a lot
   cleaner, it should be able to fall in line and modernize in this area
   too.

3) Make sparc32 use generic clockevents, from Tkhai Kirill.

[ I fixed up the BPF rules, and tried to clean up the build rules too.
  But I don't have - or want - a sparc cross-build environment, so the
  BPF rule bug and the related build cleanup was all done with just a
  bare "make -n" pseudo-test.      - Linus ]

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next: (110 commits)
  sparc32: use flushi when run-time patching in per_cpu_patch
  sparc32: fix cpuid_patch run-time patching
  sparc32: drop unused inline functions in srmmu.c
  sparc32: drop unused functions in pgtsrmmu.h
  sparc32,leon: move leon mmu functions to leon_mm.c
  sparc32,leon: remove duplicate definitions in leon.h
  sparc32,leon: remove duplicate UART register definitions
  sparc32,leon: move leon ASI definitions to asi.h
  sparc32: move trap table to a separate file
  sparc64: renamed ttable.S to ttable_64.S
  sparc32: Remove asm/sysen.h header.
  sparc32: Delete asm/smpprim.h
  sparc32: Remove unused empty_bad_page{,_table} declarations.
  sparc32: Kill boot_cpu_id4
  sparc32: Move GET_PROCESSOR*_ID() out of asm/asmmacro.h
  sparc32: Remove completely unused code from asm/cache.h
  sparc32: Add ucmpdi2.o to obj-y instead of lib-y.
  sparc32: add ucmpdi2
  sparc: introduce arch/sparc/Kbuild
  sparc: remove obsolete documentation
  ...
This commit is contained in:
Linus Torvalds 2012-05-21 10:32:01 -07:00
commit 9daeaa3705
137 changed files with 2257 additions and 10787 deletions

View File

@ -1,46 +0,0 @@
BTFIXUP
-------
To build new kernels you have to issue "make image". The ready kernel
in ELF format is placed in arch/sparc/boot/image. Explanation is below.
BTFIXUP is a unique feature of Linux/sparc among other architectures,
developed by Jakub Jelinek (I think... Obviously David S. Miller took
part, too). It allows to boot the same kernel at different
sub-architectures, such as sun4c, sun4m, sun4d, where SunOS uses
different kernels. This feature is convinient for people who you move
disks between boxes and for distrution builders.
To function, BTFIXUP must link the kernel "in the draft" first,
analyze the result, write a special stub code based on that, and
build the final kernel with the stub (btfix.o).
Kai Germaschewski improved the build system of the kernel in the 2.5 series
significantly. Unfortunately, the traditional way of running the draft
linking from architecture specific Makefile before the actual linking
by generic Makefile is nearly impossible to support properly in the
new build system. Therefore, the way we integrate BTFIXUP with the
build system was changed in 2.5.40. Now, generic Makefile performs
the draft linking and stores the result in file vmlinux. Architecture
specific post-processing invokes BTFIXUP machinery and final linking
in the same way as other architectures do bootstraps.
Implications of that change are as follows.
1. Hackers must type "make image" now, instead of just "make", in the same
way as s390 people do now. It is analogous to "make bzImage" on i386.
This does NOT affect sparc64, you continue to use "make" to build sparc64
kernels.
2. vmlinux is not the final kernel, so RPM builders have to adjust
their spec files (if they delivered vmlinux for debugging).
System.map generated for vmlinux is still valid.
3. Scripts that produce a.out images have to be changed. First, if they
invoke make, they have to use "make image". Second, they have to pick up
the new kernel in arch/sparc/boot/image instead of vmlinux.
4. Since we are compliant with Kai's build system now, make -j is permitted.
-- Pete Zaitcev
zaitcev@yahoo.com

8
arch/sparc/Kbuild Normal file
View File

@ -0,0 +1,8 @@
#
# core part of the sparc kernel
#
obj-y += kernel/
obj-y += mm/
obj-y += math-emu/
obj-y += net/

View File

@ -30,7 +30,7 @@ config SPARC
select USE_GENERIC_SMP_HELPERS if SMP
select GENERIC_PCI_IOMAP
select HAVE_NMI_WATCHDOG if SPARC64
select HAVE_BPF_JIT
select HAVE_BPF_JIT if NET
config SPARC32
def_bool !64BIT
@ -62,6 +62,7 @@ config SPARC64
select IRQ_PREFLOW_FASTEOI
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select HAVE_C_RECORDMCOUNT
select NO_BOOTMEM
config ARCH_DEFCONFIG
string
@ -74,17 +75,12 @@ config BITS
default 32 if SPARC32
default 64 if SPARC64
config ARCH_USES_GETTIMEOFFSET
bool
default y if SPARC32
config GENERIC_CMOS_UPDATE
bool
default y
config GENERIC_CLOCKEVENTS
bool
default y if SPARC64
def_bool y
config IOMMU_HELPER
bool
@ -155,7 +151,7 @@ source "kernel/Kconfig.freezer"
menu "Processor type and features"
config SMP
bool "Symmetric multi-processing support (does not work on sun4/sun4c)"
bool "Symmetric multi-processing support"
---help---
This enables support for systems with more than one CPU. If you have
a system with only one CPU, say N. If you have a system with more

View File

@ -19,39 +19,27 @@ ifeq ($(CONFIG_SPARC32),y)
# sparc32
#
#
# Uncomment the first KBUILD_CFLAGS if you are doing kgdb source level
# debugging of the kernel to get the proper debugging information.
AS := $(AS) -32
LDFLAGS := -m elf32_sparc
CHECKFLAGS += -D__sparc__
LDFLAGS := -m elf32_sparc
export BITS := 32
UTS_MACHINE := sparc
#KBUILD_CFLAGS += -g -pipe -fcall-used-g5 -fcall-used-g7
KBUILD_CFLAGS += -m32 -pipe -mno-fpu -fcall-used-g5 -fcall-used-g7
KBUILD_AFLAGS += -m32 -Wa,-Av8
#LDFLAGS_vmlinux = -N -Ttext 0xf0004000
# Since 2.5.40, the first stage is left not btfix-ed.
# Actual linking is done with "make image".
LDFLAGS_vmlinux = -r
KBUILD_CFLAGS += -m32 -mcpu=v8 -pipe -mno-fpu -fcall-used-g5 -fcall-used-g7
KBUILD_AFLAGS += -m32 -Wa,-Av8
else
#####
# sparc64
#
CHECKFLAGS += -D__sparc__ -D__sparc_v9__ -D__arch64__ -m64
CHECKFLAGS += -D__sparc__ -D__sparc_v9__ -D__arch64__ -m64
LDFLAGS := -m elf64_sparc
export BITS := 64
UTS_MACHINE := sparc64
LDFLAGS := -m elf64_sparc
export BITS := 64
UTS_MACHINE := sparc64
KBUILD_CFLAGS += -m64 -pipe -mno-fpu -mcpu=ultrasparc -mcmodel=medlow \
-ffixed-g4 -ffixed-g5 -fcall-used-g7 -Wno-sign-compare \
-Wa,--undeclared-regs
KBUILD_CFLAGS += -m64 -pipe -mno-fpu -mcpu=ultrasparc -mcmodel=medlow
KBUILD_CFLAGS += -ffixed-g4 -ffixed-g5 -fcall-used-g7 -Wno-sign-compare
KBUILD_CFLAGS += -Wa,--undeclared-regs
KBUILD_CFLAGS += $(call cc-option,-mtune=ultrasparc3)
KBUILD_AFLAGS += -m64 -mcpu=ultrasparc -Wa,--undeclared-regs
@ -64,26 +52,14 @@ endif
head-y := arch/sparc/kernel/head_$(BITS).o
head-y += arch/sparc/kernel/init_task.o
core-y += arch/sparc/kernel/
core-y += arch/sparc/mm/ arch/sparc/math-emu/
core-y += arch/sparc/net/
# See arch/sparc/Kbuild for the core part of the kernel
core-y += arch/sparc/
libs-y += arch/sparc/prom/
libs-y += arch/sparc/lib/
drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
# Export what is needed by arch/sparc/boot/Makefile
export VMLINUX_INIT VMLINUX_MAIN
VMLINUX_INIT := $(head-y) $(init-y)
VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
VMLINUX_MAIN += $(drivers-y) $(net-y)
ifdef CONFIG_KALLSYMS
export kallsyms.o := .tmp_kallsyms2.o
endif
boot := arch/sparc/boot
# Default target

View File

@ -6,8 +6,8 @@
ROOT_IMG := /usr/src/root.img
ELFTOAOUT := elftoaout
hostprogs-y := piggyback btfixupprep
targets := tftpboot.img btfix.o btfix.S image zImage vmlinux.aout
hostprogs-y := piggyback
targets := tftpboot.img image zImage vmlinux.aout
clean-files := System.map
quiet_cmd_elftoaout = ELFTOAOUT $@
@ -17,58 +17,9 @@ quiet_cmd_piggy = PIGGY $@
quiet_cmd_strip = STRIP $@
cmd_strip = $(STRIP) -R .comment -R .note -K sun4u_init -K _end -K _start $< -o $@
ifeq ($(CONFIG_SPARC32),y)
quiet_cmd_btfix = BTFIX $@
cmd_btfix = $(OBJDUMP) -x vmlinux | $(obj)/btfixupprep > $@
quiet_cmd_sysmap = SYSMAP $(obj)/System.map
cmd_sysmap = $(CONFIG_SHELL) $(srctree)/scripts/mksysmap
quiet_cmd_image = LD $@
cmd_image = $(LD) $(LDFLAGS) $(EXTRA_LDFLAGS) $(LDFLAGS_$(@F)) -o $@
define rule_image
$(if $($(quiet)cmd_image), \
echo ' $($(quiet)cmd_image)' &&) \
$(cmd_image); \
$(if $($(quiet)cmd_sysmap), \
echo ' $($(quiet)cmd_sysmap)' &&) \
$(cmd_sysmap) $@ $(obj)/System.map; \
if [ $$? -ne 0 ]; then \
rm -f $@; \
/bin/false; \
fi; \
echo 'cmd_$@ := $(cmd_image)' > $(@D)/.$(@F).cmd
endef
BTOBJS := $(patsubst %/, %/built-in.o, $(VMLINUX_INIT))
BTLIBS := $(patsubst %/, %/built-in.o, $(VMLINUX_MAIN))
LDFLAGS_image := -T arch/sparc/kernel/vmlinux.lds $(BTOBJS) \
--start-group $(BTLIBS) --end-group \
$(kallsyms.o) $(obj)/btfix.o
# Link the final image including btfixup'ed symbols.
# This is a replacement for the link done in the top-level Makefile.
# Note: No dependency on the prerequisite files since that would require
# make to try check if they are updated - and due to changes
# in gcc options (path for example) this would result in
# these files being recompiled for each build.
$(obj)/image: $(obj)/btfix.o FORCE
$(call if_changed_rule,image)
$(obj)/zImage: $(obj)/image
$(call if_changed,strip)
@echo ' kernel: $@ is ready'
$(obj)/btfix.S: $(obj)/btfixupprep vmlinux FORCE
$(call if_changed,btfix)
endif
ifeq ($(CONFIG_SPARC64),y)
# Actual linking
$(obj)/image: vmlinux FORCE
$(call if_changed,strip)
@echo ' kernel: $@ is ready'
$(obj)/zImage: $(obj)/image
$(call if_changed,gzip)
@ -79,6 +30,10 @@ $(obj)/vmlinux.aout: vmlinux FORCE
@echo ' kernel: $@ is ready'
else
$(obj)/zImage: $(obj)/image
$(call if_changed,strip)
@echo ' kernel: $@ is ready'
# The following lines make a readable image for U-Boot.
# uImage - Binary file read by U-boot
# uImage.o - object file of uImage for loading with a
@ -107,6 +62,10 @@ $(obj)/uImage: $(obj)/image.gz
endif
$(obj)/image: vmlinux FORCE
$(call if_changed,strip)
@echo ' kernel: $@ is ready'
$(obj)/tftpboot.img: $(obj)/image $(obj)/piggyback System.map $(ROOT_IMG) FORCE
$(call if_changed,elftoaout)
$(call if_changed,piggy)

View File

@ -1,386 +0,0 @@
/*
Simple utility to prepare vmlinux image for sparc.
Resolves all BTFIXUP uses and settings and creates
a special .s object to link to the image.
Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
#include <stdio.h>
#include <string.h>
#include <ctype.h>
#include <errno.h>
#include <unistd.h>
#include <stdlib.h>
#include <malloc.h>
#define MAXSYMS 1024
static char *symtab = "SYMBOL TABLE:";
static char *relrec = "RELOCATION RECORDS FOR [";
static int rellen;
static int symlen;
int mode;
struct _btfixup;
typedef struct _btfixuprel {
char *sect;
unsigned long offset;
struct _btfixup *f;
int frel;
struct _btfixuprel *next;
} btfixuprel;
typedef struct _btfixup {
int type;
int setinitval;
unsigned int initval;
char *initvalstr;
char *name;
btfixuprel *rel;
} btfixup;
btfixup array[MAXSYMS];
int last = 0;
char buffer[1024];
unsigned long lastfoffset = -1;
unsigned long lastfrelno;
btfixup *lastf;
static void fatal(void) __attribute__((noreturn));
static void fatal(void)
{
fprintf(stderr, "Malformed output from objdump\n%s\n", buffer);
exit(1);
}
static btfixup *find(int type, char *name)
{
int i;
for (i = 0; i < last; i++) {
if (array[i].type == type && !strcmp(array[i].name, name))
return array + i;
}
array[last].type = type;
array[last].name = strdup(name);
array[last].setinitval = 0;
if (!array[last].name) fatal();
array[last].rel = NULL;
last++;
if (last >= MAXSYMS) {
fprintf(stderr, "Ugh. Something strange. More than %d different BTFIXUP symbols\n", MAXSYMS);
exit(1);
}
return array + last - 1;
}
static void set_mode (char *buffer)
{
for (mode = 0;; mode++)
if (buffer[mode] < '0' || buffer[mode] > '9')
break;
if (mode != 8 && mode != 16)
fatal();
}
int main(int argc,char **argv)
{
char *p, *q;
char *sect;
int i, j, k;
unsigned int initval;
int shift;
btfixup *f;
btfixuprel *r, **rr;
unsigned long offset;
char *initvalstr;
symlen = strlen(symtab);
while (fgets (buffer, 1024, stdin) != NULL)
if (!strncmp (buffer, symtab, symlen))
goto main0;
fatal();
main0:
rellen = strlen(relrec);
while (fgets (buffer, 1024, stdin) != NULL)
if (!strncmp (buffer, relrec, rellen))
goto main1;
fatal();
main1:
sect = malloc(strlen (buffer + rellen) + 1);
if (!sect) fatal();
strcpy (sect, buffer + rellen);
p = strchr (sect, ']');
if (!p) fatal();
*p = 0;
if (fgets (buffer, 1024, stdin) == NULL)
fatal();
while (fgets (buffer, 1024, stdin) != NULL) {
int nbase;
if (!strncmp (buffer, relrec, rellen))
goto main1;
if (mode == 0)
set_mode (buffer);
p = strchr (buffer, '\n');
if (p) *p = 0;
if (strlen (buffer) < 22+mode)
continue;
if (strncmp (buffer + mode, " R_SPARC_", 9))
continue;
nbase = 27 - 8 + mode;
if (buffer[nbase] != '_' || buffer[nbase+1] != '_' || buffer[nbase+2] != '_')
continue;
switch (buffer[nbase+3]) {
case 'f': /* CALL */
case 'b': /* BLACKBOX */
case 's': /* SIMM13 */
case 'a': /* HALF */
case 'h': /* SETHI */
case 'i': /* INT */
break;
default:
continue;
}
p = strchr (buffer + nbase+5, '+');
if (p) *p = 0;
shift = nbase + 5;
if (buffer[nbase+4] == 's' && buffer[nbase+5] == '_') {
shift = nbase + 6;
if (strcmp (sect, ".init.text")) {
fprintf(stderr,
"Wrong use of '%s' BTFIXUPSET in '%s' section.\n"
"BTFIXUPSET_CALL can be used only in"
" __init sections\n",
buffer + shift, sect);
exit(1);
}
} else if (buffer[nbase+4] != '_')
continue;
if (!strcmp (sect, ".text.exit"))
continue;
if (strcmp (sect, ".text") &&
strcmp (sect, ".init.text") &&
strcmp (sect, ".fixup") &&
(strcmp (sect, "__ksymtab") || buffer[nbase+3] != 'f')) {
if (buffer[nbase+3] == 'f')
fprintf(stderr,
"Wrong use of '%s' in '%s' section.\n"
" It can be used only in .text, .init.text,"
" .fixup and __ksymtab\n",
buffer + shift, sect);
else
fprintf(stderr,
"Wrong use of '%s' in '%s' section.\n"
" It can be only used in .text, .init.text,"
" and .fixup\n", buffer + shift, sect);
exit(1);
}
p = strstr (buffer + shift, "__btset_");
if (p && buffer[nbase+4] == 's') {
fprintf(stderr, "__btset_ in BTFIXUP name can only be used when defining the variable, not for setting\n%s\n", buffer);
exit(1);
}
initval = 0;
initvalstr = NULL;
if (p) {
if (p[8] != '0' || p[9] != 'x') {
fprintf(stderr, "Pre-initialized values can be only initialized with hexadecimal constants starting 0x\n%s\n", buffer);
exit(1);
}
initval = strtoul(p + 10, &q, 16);
if (*q || !initval) {
fprintf(stderr, "Pre-initialized values can be only in the form name__btset_0xXXXXXXXX where X are hex digits.\nThey cannot be name__btset_0x00000000 though. Use BTFIXUPDEF_XX instead of BTFIXUPDEF_XX_INIT then.\n%s\n", buffer);
exit(1);
}
initvalstr = p + 10;
*p = 0;
}
f = find(buffer[nbase+3], buffer + shift);
if (buffer[nbase+4] == 's')
continue;
switch (buffer[nbase+3]) {
case 'f':
if (initval) {
fprintf(stderr, "Cannot use pre-initialized fixups for calls\n%s\n", buffer);
exit(1);
}
if (!strcmp (sect, "__ksymtab")) {
if (strncmp (buffer + mode+9, "32 ", 10)) {
fprintf(stderr, "BTFIXUP_CALL in EXPORT_SYMBOL results in relocation other than R_SPARC_32\n\%s\n", buffer);
exit(1);
}
} else if (strncmp (buffer + mode+9, "WDISP30 ", 10) &&
strncmp (buffer + mode+9, "HI22 ", 10) &&
strncmp (buffer + mode+9, "LO10 ", 10)) {
fprintf(stderr, "BTFIXUP_CALL results in relocation other than R_SPARC_WDISP30, R_SPARC_HI22 or R_SPARC_LO10\n%s\n", buffer);
exit(1);
}
break;
case 'b':
if (initval) {
fprintf(stderr, "Cannot use pre-initialized fixups for blackboxes\n%s\n", buffer);
exit(1);
}
if (strncmp (buffer + mode+9, "HI22 ", 10)) {
fprintf(stderr, "BTFIXUP_BLACKBOX results in relocation other than R_SPARC_HI22\n%s\n", buffer);
exit(1);
}
break;
case 's':
if (initval + 0x1000 >= 0x2000) {
fprintf(stderr, "Wrong initializer for SIMM13. Has to be from $fffff000 to $00000fff\n%s\n", buffer);
exit(1);
}
if (strncmp (buffer + mode+9, "13 ", 10)) {
fprintf(stderr, "BTFIXUP_SIMM13 results in relocation other than R_SPARC_13\n%s\n", buffer);
exit(1);
}
break;
case 'a':
if (initval + 0x1000 >= 0x2000 && (initval & 0x3ff)) {
fprintf(stderr, "Wrong initializer for HALF.\n%s\n", buffer);
exit(1);
}
if (strncmp (buffer + mode+9, "13 ", 10)) {
fprintf(stderr, "BTFIXUP_HALF results in relocation other than R_SPARC_13\n%s\n", buffer);
exit(1);
}
break;
case 'h':
if (initval & 0x3ff) {
fprintf(stderr, "Wrong initializer for SETHI. Cannot have set low 10 bits\n%s\n", buffer);
exit(1);
}
if (strncmp (buffer + mode+9, "HI22 ", 10)) {
fprintf(stderr, "BTFIXUP_SETHI results in relocation other than R_SPARC_HI22\n%s\n", buffer);
exit(1);
}
break;
case 'i':
if (initval) {
fprintf(stderr, "Cannot use pre-initialized fixups for INT\n%s\n", buffer);
exit(1);
}
if (strncmp (buffer + mode+9, "HI22 ", 10) && strncmp (buffer + mode+9, "LO10 ", 10)) {
fprintf(stderr, "BTFIXUP_INT results in relocation other than R_SPARC_HI22 and R_SPARC_LO10\n%s\n", buffer);
exit(1);
}
break;
}
if (!f->setinitval) {
f->initval = initval;
if (initvalstr) {
f->initvalstr = strdup(initvalstr);
if (!f->initvalstr) fatal();
}
f->setinitval = 1;
} else if (f->initval != initval) {
fprintf(stderr, "Btfixup %s previously used with initializer %s which doesn't match with current initializer\n%s\n",
f->name, f->initvalstr ? : "0x00000000", buffer);
exit(1);
} else if (initval && strcmp(f->initvalstr, initvalstr)) {
fprintf(stderr, "Btfixup %s previously used with initializer %s which doesn't match with current initializer.\n"
"Initializers have to match literally as well.\n%s\n",
f->name, f->initvalstr, buffer);
exit(1);
}
offset = strtoul(buffer, &q, 16);
if (q != buffer + mode || (!offset && (mode == 8 ? strncmp (buffer, "00000000 ", 9) : strncmp (buffer, "0000000000000000 ", 17)))) {
fprintf(stderr, "Malformed relocation address in\n%s\n", buffer);
exit(1);
}
for (k = 0, r = f->rel, rr = &f->rel; r; rr = &r->next, r = r->next, k++)
if (r->offset == offset && !strcmp(r->sect, sect)) {
fprintf(stderr, "Ugh. One address has two relocation records\n");
exit(1);
}
*rr = malloc(sizeof(btfixuprel));
if (!*rr) fatal();
(*rr)->offset = offset;
(*rr)->f = NULL;
if (buffer[nbase+3] == 'f') {
lastf = f;
lastfoffset = offset;
lastfrelno = k;
} else if (lastfoffset + 4 == offset) {
(*rr)->f = lastf;
(*rr)->frel = lastfrelno;
}
(*rr)->sect = sect;
(*rr)->next = NULL;
}
printf("! Generated by btfixupprep. Do not edit.\n\n");
printf("\t.section\t\".data..init\",#alloc,#write\n\t.align\t4\n\n");
printf("\t.global\t___btfixup_start\n___btfixup_start:\n\n");
for (i = 0; i < last; i++) {
f = array + i;
printf("\t.global\t___%cs_%s\n", f->type, f->name);
if (f->type == 'f')
printf("___%cs_%s:\n\t.word 0x%08x,0,0,", f->type, f->name, f->type << 24);
else
printf("___%cs_%s:\n\t.word 0x%08x,0,", f->type, f->name, f->type << 24);
for (j = 0, r = f->rel; r != NULL; j++, r = r->next);
if (j)
printf("%d\n\t.word\t", j * 2);
else
printf("0\n");
for (r = f->rel, j--; r != NULL; j--, r = r->next) {
if (!strcmp (r->sect, ".text"))
printf ("_stext+0x%08lx", r->offset);
else if (!strcmp (r->sect, ".init.text"))
printf ("__init_begin+0x%08lx", r->offset);
else if (!strcmp (r->sect, "__ksymtab"))
printf ("__start___ksymtab+0x%08lx", r->offset);
else if (!strcmp (r->sect, ".fixup"))
printf ("__start___fixup+0x%08lx", r->offset);
else
fatal();
if (f->type == 'f' || !r->f)
printf (",0");
else
printf (",___fs_%s+0x%08x", r->f->name, (4 + r->frel*2)*4 + 4);
if (j) printf (",");
else printf ("\n");
}
printf("\n");
}
printf("\n\t.global\t___btfixup_end\n___btfixup_end:\n");
printf("\n\n! Define undefined references\n\n");
for (i = 0; i < last; i++) {
f = array + i;
if (f->type == 'f') {
printf("\t.global\t___f_%s\n", f->name);
printf("___f_%s:\n", f->name);
}
}
printf("\tretl\n\t nop\n\n");
for (i = 0; i < last; i++) {
f = array + i;
if (f->type != 'f') {
if (!f->initval) {
printf("\t.global\t___%c_%s\n", f->type, f->name);
printf("___%c_%s = 0\n", f->type, f->name);
} else {
printf("\t.global\t___%c_%s__btset_0x%s\n", f->type, f->name, f->initvalstr);
printf("___%c_%s__btset_0x%s = 0x%08x\n", f->type, f->name, f->initvalstr, f->initval);
}
}
}
printf("\n\n");
exit(0);
}

View File

@ -112,6 +112,20 @@
#define ASI_M_ACTION 0x4c /* Breakpoint Action Register (GNU/Viking) */
/* LEON ASI */
#define ASI_LEON_NOCACHE 0x01
#define ASI_LEON_DCACHE_MISS 0x01
#define ASI_LEON_CACHEREGS 0x02
#define ASI_LEON_IFLUSH 0x10
#define ASI_LEON_DFLUSH 0x11
#define ASI_LEON_MMUFLUSH 0x18
#define ASI_LEON_MMUREGS 0x19
#define ASI_LEON_BYPASS 0x1c
#define ASI_LEON_FLUSH_PAGE 0x10
/* V9 Architecture mandary ASIs. */
#define ASI_N 0x04 /* Nucleus */
#define ASI_NL 0x0c /* Nucleus, little endian */

View File

@ -6,17 +6,6 @@
#ifndef _SPARC_ASMMACRO_H
#define _SPARC_ASMMACRO_H
#include <asm/btfixup.h>
#include <asm/asi.h>
#define GET_PROCESSOR4M_ID(reg) \
rd %tbr, %reg; \
srl %reg, 12, %reg; \
and %reg, 3, %reg;
#define GET_PROCESSOR4D_ID(reg) \
lda [%g0] ASI_M_VIKING_TMP1, %reg;
/* All trap entry points _must_ begin with this macro or else you
* lose. It makes sure the kernel has a proper window so that
* c-code can be called.
@ -31,10 +20,4 @@
/* All traps low-level code here must end with this macro. */
#define RESTORE_ALL b ret_trap_entry; clr %l6;
/* sun4 probably wants half word accesses to ASI_SEGMAP, while sun4c+
likes byte accesses. These are to avoid ifdef mania. */
#define lduXa lduba
#define stXa stba
#endif /* !(_SPARC_ASMMACRO_H) */

View File

@ -1,208 +0,0 @@
/*
* asm/btfixup.h: Macros for boot time linking.
*
* Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#ifndef _SPARC_BTFIXUP_H
#define _SPARC_BTFIXUP_H
#include <linux/init.h>
#ifndef __ASSEMBLY__
#ifdef MODULE
extern unsigned int ___illegal_use_of_BTFIXUP_SIMM13_in_module(void);
extern unsigned int ___illegal_use_of_BTFIXUP_SETHI_in_module(void);
extern unsigned int ___illegal_use_of_BTFIXUP_HALF_in_module(void);
extern unsigned int ___illegal_use_of_BTFIXUP_INT_in_module(void);
#define BTFIXUP_SIMM13(__name) ___illegal_use_of_BTFIXUP_SIMM13_in_module()
#define BTFIXUP_HALF(__name) ___illegal_use_of_BTFIXUP_HALF_in_module()
#define BTFIXUP_SETHI(__name) ___illegal_use_of_BTFIXUP_SETHI_in_module()
#define BTFIXUP_INT(__name) ___illegal_use_of_BTFIXUP_INT_in_module()
#define BTFIXUP_BLACKBOX(__name) ___illegal_use_of_BTFIXUP_BLACKBOX_in_module
#else
#define BTFIXUP_SIMM13(__name) ___sf_##__name()
#define BTFIXUP_HALF(__name) ___af_##__name()
#define BTFIXUP_SETHI(__name) ___hf_##__name()
#define BTFIXUP_INT(__name) ((unsigned int)&___i_##__name)
/* This must be written in assembly and present in a sethi */
#define BTFIXUP_BLACKBOX(__name) ___b_##__name
#endif /* MODULE */
/* Fixup call xx */
#define BTFIXUPDEF_CALL(__type, __name, __args...) \
extern __type ___f_##__name(__args); \
extern unsigned ___fs_##__name[3];
#define BTFIXUPDEF_CALL_CONST(__type, __name, __args...) \
extern __type ___f_##__name(__args) __attribute_const__; \
extern unsigned ___fs_##__name[3];
#define BTFIXUP_CALL(__name) ___f_##__name
#define BTFIXUPDEF_BLACKBOX(__name) \
extern unsigned ___bs_##__name[2];
/* Put bottom 13bits into some register variable */
#define BTFIXUPDEF_SIMM13(__name) \
static inline unsigned int ___sf_##__name(void) __attribute_const__; \
extern unsigned ___ss_##__name[2]; \
static inline unsigned int ___sf_##__name(void) { \
unsigned int ret; \
__asm__ ("or %%g0, ___s_" #__name ", %0" : "=r"(ret)); \
return ret; \
}
#define BTFIXUPDEF_SIMM13_INIT(__name,__val) \
static inline unsigned int ___sf_##__name(void) __attribute_const__; \
extern unsigned ___ss_##__name[2]; \
static inline unsigned int ___sf_##__name(void) { \
unsigned int ret; \
__asm__ ("or %%g0, ___s_" #__name "__btset_" #__val ", %0" : "=r"(ret));\
return ret; \
}
/* Put either bottom 13 bits, or upper 22 bits into some register variable
* (depending on the value, this will lead into sethi FIX, reg; or
* mov FIX, reg; )
*/
#define BTFIXUPDEF_HALF(__name) \
static inline unsigned int ___af_##__name(void) __attribute_const__; \
extern unsigned ___as_##__name[2]; \
static inline unsigned int ___af_##__name(void) { \
unsigned int ret; \
__asm__ ("or %%g0, ___a_" #__name ", %0" : "=r"(ret)); \
return ret; \
}
#define BTFIXUPDEF_HALF_INIT(__name,__val) \
static inline unsigned int ___af_##__name(void) __attribute_const__; \
extern unsigned ___as_##__name[2]; \
static inline unsigned int ___af_##__name(void) { \
unsigned int ret; \
__asm__ ("or %%g0, ___a_" #__name "__btset_" #__val ", %0" : "=r"(ret));\
return ret; \
}
/* Put upper 22 bits into some register variable */
#define BTFIXUPDEF_SETHI(__name) \
static inline unsigned int ___hf_##__name(void) __attribute_const__; \
extern unsigned ___hs_##__name[2]; \
static inline unsigned int ___hf_##__name(void) { \
unsigned int ret; \
__asm__ ("sethi %%hi(___h_" #__name "), %0" : "=r"(ret)); \
return ret; \
}
#define BTFIXUPDEF_SETHI_INIT(__name,__val) \
static inline unsigned int ___hf_##__name(void) __attribute_const__; \
extern unsigned ___hs_##__name[2]; \
static inline unsigned int ___hf_##__name(void) { \
unsigned int ret; \
__asm__ ("sethi %%hi(___h_" #__name "__btset_" #__val "), %0" : \
"=r"(ret)); \
return ret; \
}
/* Put a full 32bit integer into some register variable */
#define BTFIXUPDEF_INT(__name) \
extern unsigned char ___i_##__name; \
extern unsigned ___is_##__name[2];
#define BTFIXUPCALL_NORM 0x00000000 /* Always call */
#define BTFIXUPCALL_NOP 0x01000000 /* Possibly optimize to nop */
#define BTFIXUPCALL_RETINT(i) (0x90102000|((i) & 0x1fff)) /* Possibly optimize to mov i, %o0 */
#define BTFIXUPCALL_ORINT(i) (0x90122000|((i) & 0x1fff)) /* Possibly optimize to or %o0, i, %o0 */
#define BTFIXUPCALL_RETO0 0x01000000 /* Return first parameter, actually a nop */
#define BTFIXUPCALL_ANDNINT(i) (0x902a2000|((i) & 0x1fff)) /* Possibly optimize to andn %o0, i, %o0 */
#define BTFIXUPCALL_SWAPO0O1 0xd27a0000 /* Possibly optimize to swap [%o0],%o1 */
#define BTFIXUPCALL_SWAPO0G0 0xc07a0000 /* Possibly optimize to swap [%o0],%g0 */
#define BTFIXUPCALL_SWAPG1G2 0xc4784000 /* Possibly optimize to swap [%g1],%g2 */
#define BTFIXUPCALL_STG0O0 0xc0220000 /* Possibly optimize to st %g0,[%o0] */
#define BTFIXUPCALL_STO1O0 0xd2220000 /* Possibly optimize to st %o1,[%o0] */
#define BTFIXUPSET_CALL(__name, __addr, __insn) \
do { \
___fs_##__name[0] |= 1; \
___fs_##__name[1] = (unsigned long)__addr; \
___fs_##__name[2] = __insn; \
} while (0)
#define BTFIXUPSET_BLACKBOX(__name, __func) \
do { \
___bs_##__name[0] |= 1; \
___bs_##__name[1] = (unsigned long)__func; \
} while (0)
#define BTFIXUPCOPY_CALL(__name, __from) \
do { \
___fs_##__name[0] |= 1; \
___fs_##__name[1] = ___fs_##__from[1]; \
___fs_##__name[2] = ___fs_##__from[2]; \
} while (0)
#define BTFIXUPSET_SIMM13(__name, __val) \
do { \
___ss_##__name[0] |= 1; \
___ss_##__name[1] = (unsigned)__val; \
} while (0)
#define BTFIXUPCOPY_SIMM13(__name, __from) \
do { \
___ss_##__name[0] |= 1; \
___ss_##__name[1] = ___ss_##__from[1]; \
} while (0)
#define BTFIXUPSET_HALF(__name, __val) \
do { \
___as_##__name[0] |= 1; \
___as_##__name[1] = (unsigned)__val; \
} while (0)
#define BTFIXUPCOPY_HALF(__name, __from) \
do { \
___as_##__name[0] |= 1; \
___as_##__name[1] = ___as_##__from[1]; \
} while (0)
#define BTFIXUPSET_SETHI(__name, __val) \
do { \
___hs_##__name[0] |= 1; \
___hs_##__name[1] = (unsigned)__val; \
} while (0)
#define BTFIXUPCOPY_SETHI(__name, __from) \
do { \
___hs_##__name[0] |= 1; \
___hs_##__name[1] = ___hs_##__from[1]; \
} while (0)
#define BTFIXUPSET_INT(__name, __val) \
do { \
___is_##__name[0] |= 1; \
___is_##__name[1] = (unsigned)__val; \
} while (0)
#define BTFIXUPCOPY_INT(__name, __from) \
do { \
___is_##__name[0] |= 1; \
___is_##__name[1] = ___is_##__from[1]; \
} while (0)
#define BTFIXUPVAL_CALL(__name) \
((unsigned long)___fs_##__name[1])
extern void btfixup(void);
#else /* __ASSEMBLY__ */
#define BTFIXUP_SETHI(__name) %hi(___h_ ## __name)
#define BTFIXUP_SETHI_INIT(__name,__val) %hi(___h_ ## __name ## __btset_ ## __val)
#endif /* __ASSEMBLY__ */
#endif /* !(_SPARC_BTFIXUP_H) */

View File

@ -22,118 +22,4 @@
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
#ifdef CONFIG_SPARC32
#include <asm/asi.h>
/* Direct access to the instruction cache is provided through and
* alternate address space. The IDC bit must be off in the ICCR on
* HyperSparcs for these accesses to work. The code below does not do
* any checking, the caller must do so. These routines are for
* diagnostics only, but could end up being useful. Use with care.
* Also, you are asking for trouble if you execute these in one of the
* three instructions following a %asr/%psr access or modification.
*/
/* First, cache-tag access. */
static inline unsigned int get_icache_tag(int setnum, int tagnum)
{
unsigned int vaddr, retval;
vaddr = ((setnum&1) << 12) | ((tagnum&0x7f) << 5);
__asm__ __volatile__("lda [%1] %2, %0\n\t" :
"=r" (retval) :
"r" (vaddr), "i" (ASI_M_TXTC_TAG));
return retval;
}
static inline void put_icache_tag(int setnum, int tagnum, unsigned int entry)
{
unsigned int vaddr;
vaddr = ((setnum&1) << 12) | ((tagnum&0x7f) << 5);
__asm__ __volatile__("sta %0, [%1] %2\n\t" : :
"r" (entry), "r" (vaddr), "i" (ASI_M_TXTC_TAG) :
"memory");
}
/* Second cache-data access. The data is returned two-32bit quantities
* at a time.
*/
static inline void get_icache_data(int setnum, int tagnum, int subblock,
unsigned int *data)
{
unsigned int value1, value2, vaddr;
vaddr = ((setnum&0x1) << 12) | ((tagnum&0x7f) << 5) |
((subblock&0x3) << 3);
__asm__ __volatile__("ldda [%2] %3, %%g2\n\t"
"or %%g0, %%g2, %0\n\t"
"or %%g0, %%g3, %1\n\t" :
"=r" (value1), "=r" (value2) :
"r" (vaddr), "i" (ASI_M_TXTC_DATA) :
"g2", "g3");
data[0] = value1; data[1] = value2;
}
static inline void put_icache_data(int setnum, int tagnum, int subblock,
unsigned int *data)
{
unsigned int value1, value2, vaddr;
vaddr = ((setnum&0x1) << 12) | ((tagnum&0x7f) << 5) |
((subblock&0x3) << 3);
value1 = data[0]; value2 = data[1];
__asm__ __volatile__("or %%g0, %0, %%g2\n\t"
"or %%g0, %1, %%g3\n\t"
"stda %%g2, [%2] %3\n\t" : :
"r" (value1), "r" (value2),
"r" (vaddr), "i" (ASI_M_TXTC_DATA) :
"g2", "g3", "memory" /* no joke */);
}
/* Different types of flushes with the ICACHE. Some of the flushes
* affect both the ICACHE and the external cache. Others only clear
* the ICACHE entries on the cpu itself. V8's (most) allow
* granularity of flushes on the packet (element in line), whole line,
* and entire cache (ie. all lines) level. The ICACHE only flushes are
* ROSS HyperSparc specific and are in ross.h
*/
/* Flushes which clear out both the on-chip and external caches */
static inline void flush_ei_page(unsigned int addr)
{
__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
"r" (addr), "i" (ASI_M_FLUSH_PAGE) :
"memory");
}
static inline void flush_ei_seg(unsigned int addr)
{
__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
"r" (addr), "i" (ASI_M_FLUSH_SEG) :
"memory");
}
static inline void flush_ei_region(unsigned int addr)
{
__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
"r" (addr), "i" (ASI_M_FLUSH_REGION) :
"memory");
}
static inline void flush_ei_ctx(unsigned int addr)
{
__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
"r" (addr), "i" (ASI_M_FLUSH_CTX) :
"memory");
}
static inline void flush_ei_user(unsigned int addr)
{
__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
"r" (addr), "i" (ASI_M_FLUSH_USER) :
"memory");
}
#endif /* CONFIG_SPARC32 */
#endif /* !(_SPARC_CACHE_H) */

View File

@ -1,5 +1,9 @@
#ifndef ___ASM_SPARC_CACHEFLUSH_H
#define ___ASM_SPARC_CACHEFLUSH_H
/* flush addr - to allow use of self-modifying code */
#define flushi(addr) __asm__ __volatile__ ("flush %0" : : "r" (addr) : "memory")
#if defined(__sparc__) && defined(__arch64__)
#include <asm/cacheflush_64.h>
#else

View File

@ -1,56 +1,18 @@
#ifndef _SPARC_CACHEFLUSH_H
#define _SPARC_CACHEFLUSH_H
#include <linux/mm.h> /* Common for other includes */
// #include <linux/kernel.h> from pgalloc.h
// #include <linux/sched.h> from pgalloc.h
#include <asm/cachetlb_32.h>
// #include <asm/page.h>
#include <asm/btfixup.h>
/*
* Fine grained cache flushing.
*/
#ifdef CONFIG_SMP
BTFIXUPDEF_CALL(void, local_flush_cache_all, void)
BTFIXUPDEF_CALL(void, local_flush_cache_mm, struct mm_struct *)
BTFIXUPDEF_CALL(void, local_flush_cache_range, struct vm_area_struct *, unsigned long, unsigned long)
BTFIXUPDEF_CALL(void, local_flush_cache_page, struct vm_area_struct *, unsigned long)
#define local_flush_cache_all() BTFIXUP_CALL(local_flush_cache_all)()
#define local_flush_cache_mm(mm) BTFIXUP_CALL(local_flush_cache_mm)(mm)
#define local_flush_cache_range(vma,start,end) BTFIXUP_CALL(local_flush_cache_range)(vma,start,end)
#define local_flush_cache_page(vma,addr) BTFIXUP_CALL(local_flush_cache_page)(vma,addr)
BTFIXUPDEF_CALL(void, local_flush_page_to_ram, unsigned long)
BTFIXUPDEF_CALL(void, local_flush_sig_insns, struct mm_struct *, unsigned long)
#define local_flush_page_to_ram(addr) BTFIXUP_CALL(local_flush_page_to_ram)(addr)
#define local_flush_sig_insns(mm,insn_addr) BTFIXUP_CALL(local_flush_sig_insns)(mm,insn_addr)
extern void smp_flush_cache_all(void);
extern void smp_flush_cache_mm(struct mm_struct *mm);
extern void smp_flush_cache_range(struct vm_area_struct *vma,
unsigned long start,
unsigned long end);
extern void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
extern void smp_flush_page_to_ram(unsigned long page);
extern void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
#endif /* CONFIG_SMP */
BTFIXUPDEF_CALL(void, flush_cache_all, void)
BTFIXUPDEF_CALL(void, flush_cache_mm, struct mm_struct *)
BTFIXUPDEF_CALL(void, flush_cache_range, struct vm_area_struct *, unsigned long, unsigned long)
BTFIXUPDEF_CALL(void, flush_cache_page, struct vm_area_struct *, unsigned long)
#define flush_cache_all() BTFIXUP_CALL(flush_cache_all)()
#define flush_cache_mm(mm) BTFIXUP_CALL(flush_cache_mm)(mm)
#define flush_cache_dup_mm(mm) BTFIXUP_CALL(flush_cache_mm)(mm)
#define flush_cache_range(vma,start,end) BTFIXUP_CALL(flush_cache_range)(vma,start,end)
#define flush_cache_page(vma,addr,pfn) BTFIXUP_CALL(flush_cache_page)(vma,addr)
#define flush_cache_all() \
sparc32_cachetlb_ops->cache_all()
#define flush_cache_mm(mm) \
sparc32_cachetlb_ops->cache_mm(mm)
#define flush_cache_dup_mm(mm) \
sparc32_cachetlb_ops->cache_mm(mm)
#define flush_cache_range(vma,start,end) \
sparc32_cachetlb_ops->cache_range(vma, start, end)
#define flush_cache_page(vma,addr,pfn) \
sparc32_cachetlb_ops->cache_page(vma, addr)
#define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma, pg) do { } while (0)
@ -67,11 +29,12 @@ BTFIXUPDEF_CALL(void, flush_cache_page, struct vm_area_struct *, unsigned long)
memcpy(dst, src, len); \
} while (0)
BTFIXUPDEF_CALL(void, __flush_page_to_ram, unsigned long)
BTFIXUPDEF_CALL(void, flush_sig_insns, struct mm_struct *, unsigned long)
#define __flush_page_to_ram(addr) BTFIXUP_CALL(__flush_page_to_ram)(addr)
#define flush_sig_insns(mm,insn_addr) BTFIXUP_CALL(flush_sig_insns)(mm,insn_addr)
#define __flush_page_to_ram(addr) \
sparc32_cachetlb_ops->page_to_ram(addr)
#define flush_sig_insns(mm,insn_addr) \
sparc32_cachetlb_ops->sig_insns(mm, insn_addr)
#define flush_page_for_dma(addr) \
sparc32_cachetlb_ops->page_for_dma(addr)
extern void sparc_flush_page_to_ram(struct page *page);

View File

@ -8,9 +8,6 @@
#include <linux/mm.h>
/* Cache flush operations. */
#define flushi(addr) __asm__ __volatile__ ("flush %0" : : "r" (addr) : "memory")
#define flushw_all() __asm__ __volatile__("flushw")
extern void __flushw_user(void);

View File

@ -0,0 +1,29 @@
#ifndef _SPARC_CACHETLB_H
#define _SPARC_CACHETLB_H
struct mm_struct;
struct vm_area_struct;
struct sparc32_cachetlb_ops {
void (*cache_all)(void);
void (*cache_mm)(struct mm_struct *);
void (*cache_range)(struct vm_area_struct *, unsigned long,
unsigned long);
void (*cache_page)(struct vm_area_struct *, unsigned long);
void (*tlb_all)(void);
void (*tlb_mm)(struct mm_struct *);
void (*tlb_range)(struct vm_area_struct *, unsigned long,
unsigned long);
void (*tlb_page)(struct vm_area_struct *, unsigned long);
void (*page_to_ram)(unsigned long);
void (*sig_insns)(struct mm_struct *, unsigned long);
void (*page_for_dma)(unsigned long);
};
extern const struct sparc32_cachetlb_ops *sparc32_cachetlb_ops;
#ifdef CONFIG_SMP
extern const struct sparc32_cachetlb_ops *local_ops;
#endif
#endif /* SPARC_CACHETLB_H */

View File

@ -11,40 +11,13 @@
#ifndef __ARCH_SPARC_CMPXCHG__
#define __ARCH_SPARC_CMPXCHG__
#include <asm/btfixup.h>
/* This has special calling conventions */
#ifndef CONFIG_SMP
BTFIXUPDEF_CALL(void, ___xchg32, void)
#endif
static inline unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val)
{
#ifdef CONFIG_SMP
__asm__ __volatile__("swap [%2], %0"
: "=&r" (val)
: "0" (val), "r" (m)
: "memory");
return val;
#else
register unsigned long *ptr asm("g1");
register unsigned long ret asm("g2");
ptr = (unsigned long *) m;
ret = val;
/* Note: this is magic and the nop there is
really needed. */
__asm__ __volatile__(
"mov %%o7, %%g4\n\t"
"call ___f____xchg32\n\t"
" nop\n\t"
: "=&r" (ret)
: "0" (ret), "r" (ptr)
: "g3", "g4", "g7", "memory", "cc");
return ret;
#endif
}
extern void __xchg_called_with_bad_pointer(void);

View File

@ -7,28 +7,6 @@
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
/* 3=sun3
4=sun4 (as in sun4 sysmaint student book)
c=sun4c (according to davem) */
#define AC_IDPROM 0x00000000 /* 34 ID PROM, R/O, byte, 32 bytes */
#define AC_PAGEMAP 0x10000000 /* 3 Pagemap R/W, long */
#define AC_SEGMAP 0x20000000 /* 3 Segment map, byte */
#define AC_CONTEXT 0x30000000 /* 34c current mmu-context */
#define AC_SENABLE 0x40000000 /* 34c system dvma/cache/reset enable reg*/
#define AC_UDVMA_ENB 0x50000000 /* 34 Not used on Sun boards, byte */
#define AC_BUS_ERROR 0x60000000 /* 34 Not cleared on read, byte. */
#define AC_SYNC_ERR 0x60000000 /* c fault type */
#define AC_SYNC_VA 0x60000004 /* c fault virtual address */
#define AC_ASYNC_ERR 0x60000008 /* c asynchronous fault type */
#define AC_ASYNC_VA 0x6000000c /* c async fault virtual address */
#define AC_LEDS 0x70000000 /* 34 Zero turns on LEDs, byte */
#define AC_CACHETAGS 0x80000000 /* 34c direct access to the VAC tags */
#define AC_CACHEDDATA 0x90000000 /* 3 c direct access to the VAC data */
#define AC_UDVMA_MAP 0xD0000000 /* 4 Not used on Sun boards, byte */
#define AC_VME_VECTOR 0xE0000000 /* 4 For non-Autovector VME, byte */
#define AC_BOOT_SCC 0xF0000000 /* 34 bypass to access Zilog 8530. byte.*/
/* s=Swift, h=Ross_HyperSPARC, v=TI_Viking, t=Tsunami, r=Ross_Cypress */
#define AC_M_PCR 0x0000 /* shv Processor Control Reg */
#define AC_M_CTPR 0x0100 /* shv Context Table Pointer Reg */

View File

@ -5,30 +5,24 @@
* Sparc (general) CPU types
*/
enum sparc_cpu {
sun4 = 0x00,
sun4c = 0x01,
sun4m = 0x02,
sun4d = 0x03,
sun4e = 0x04,
sun4u = 0x05, /* V8 ploos ploos */
sun_unknown = 0x06,
ap1000 = 0x07, /* almost a sun4m */
sparc_leon = 0x08, /* Leon SoC */
sun4m = 0x00,
sun4d = 0x01,
sun4e = 0x02,
sun4u = 0x03, /* V8 ploos ploos */
sun_unknown = 0x04,
ap1000 = 0x05, /* almost a sun4m */
sparc_leon = 0x06, /* Leon SoC */
};
#ifdef CONFIG_SPARC32
extern enum sparc_cpu sparc_cpu_model;
#define ARCH_SUN4C (sparc_cpu_model==sun4c)
#define SUN4M_NCPUS 4 /* Architectural limit of sun4m. */
#else
#define sparc_cpu_model sun4u
/* This cannot ever be a sun4c :) That's just history. */
#define ARCH_SUN4C 0
#endif
#endif /* __ASM_CPU_TYPE_H */

View File

@ -14,7 +14,6 @@
typedef struct {
unsigned long udelay_val;
unsigned long clock_tick;
unsigned int multiplier;
unsigned int counter;
#ifdef CONFIG_SMP
unsigned int irq_resched_count;

View File

@ -1,79 +0,0 @@
/*
* cypress.h: Cypress module specific definitions and defines.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
#ifndef _SPARC_CYPRESS_H
#define _SPARC_CYPRESS_H
/* Cypress chips have %psr 'impl' of '0001' and 'vers' of '0001'. */
/* The MMU control register fields on the Sparc Cypress 604/605 MMU's.
*
* ---------------------------------------------------------------
* |implvers| MCA | MCM |MV| MID |BM| C|RSV|MR|CM|CL|CE|RSV|NF|ME|
* ---------------------------------------------------------------
* 31 24 23-22 21-20 19 18-15 14 13 12 11 10 9 8 7-2 1 0
*
* MCA: MultiChip Access -- Used for configuration of multiple
* CY7C604/605 cache units.
* MCM: MultiChip Mask -- Again, for multiple cache unit config.
* MV: MultiChip Valid -- Indicates MCM and MCA have valid settings.
* MID: ModuleID -- Unique processor ID for MBus transactions. (605 only)
* BM: Boot Mode -- 0 = not in boot mode, 1 = in boot mode
* C: Cacheable -- Indicates whether accesses are cacheable while
* the MMU is off. 0=no 1=yes
* MR: MemoryReflection -- Indicates whether the bus attached to the
* MBus supports memory reflection. 0=no 1=yes (605 only)
* CM: CacheMode -- Indicates whether the cache is operating in write
* through or copy-back mode. 0=write-through 1=copy-back
* CL: CacheLock -- Indicates if the entire cache is locked or not.
* 0=not-locked 1=locked (604 only)
* CE: CacheEnable -- Is the virtual cache on? 0=no 1=yes
* NF: NoFault -- Do faults generate traps? 0=yes 1=no
* ME: MmuEnable -- Is the MMU doing translations? 0=no 1=yes
*/
#define CYPRESS_MCA 0x00c00000
#define CYPRESS_MCM 0x00300000
#define CYPRESS_MVALID 0x00080000
#define CYPRESS_MIDMASK 0x00078000 /* Only on 605 */
#define CYPRESS_BMODE 0x00004000
#define CYPRESS_ACENABLE 0x00002000
#define CYPRESS_MRFLCT 0x00000800 /* Only on 605 */
#define CYPRESS_CMODE 0x00000400
#define CYPRESS_CLOCK 0x00000200 /* Only on 604 */
#define CYPRESS_CENABLE 0x00000100
#define CYPRESS_NFAULT 0x00000002
#define CYPRESS_MENABLE 0x00000001
static inline void cypress_flush_page(unsigned long page)
{
__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
"r" (page), "i" (ASI_M_FLUSH_PAGE));
}
static inline void cypress_flush_segment(unsigned long addr)
{
__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
"r" (addr), "i" (ASI_M_FLUSH_SEG));
}
static inline void cypress_flush_region(unsigned long addr)
{
__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
"r" (addr), "i" (ASI_M_FLUSH_REGION));
}
static inline void cypress_flush_context(void)
{
__asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : :
"i" (ASI_M_FLUSH_CTX));
}
/* XXX Displacement flushes for buggy chips and initial testing
* XXX go here.
*/
#endif /* !(_SPARC_CYPRESS_H) */

View File

@ -92,27 +92,31 @@ extern int isa_dma_bridge_buggy;
#ifdef CONFIG_SPARC32
/* Routines for data transfer buffers. */
BTFIXUPDEF_CALL(char *, mmu_lockarea, char *, unsigned long)
BTFIXUPDEF_CALL(void, mmu_unlockarea, char *, unsigned long)
#define mmu_lockarea(vaddr,len) BTFIXUP_CALL(mmu_lockarea)(vaddr,len)
#define mmu_unlockarea(vaddr,len) BTFIXUP_CALL(mmu_unlockarea)(vaddr,len)
struct page;
struct device;
struct scatterlist;
/* These are implementations for sbus_map_sg/sbus_unmap_sg... collapse later */
BTFIXUPDEF_CALL(__u32, mmu_get_scsi_one, struct device *, char *, unsigned long)
BTFIXUPDEF_CALL(void, mmu_get_scsi_sgl, struct device *, struct scatterlist *, int)
BTFIXUPDEF_CALL(void, mmu_release_scsi_one, struct device *, __u32, unsigned long)
BTFIXUPDEF_CALL(void, mmu_release_scsi_sgl, struct device *, struct scatterlist *, int)
struct sparc32_dma_ops {
__u32 (*get_scsi_one)(struct device *, char *, unsigned long);
void (*get_scsi_sgl)(struct device *, struct scatterlist *, int);
void (*release_scsi_one)(struct device *, __u32, unsigned long);
void (*release_scsi_sgl)(struct device *, struct scatterlist *,int);
#ifdef CONFIG_SBUS
int (*map_dma_area)(struct device *, dma_addr_t *, unsigned long, unsigned long, int);
void (*unmap_dma_area)(struct device *, unsigned long, int);
#endif
};
extern const struct sparc32_dma_ops *sparc32_dma_ops;
#define mmu_get_scsi_one(dev,vaddr,len) BTFIXUP_CALL(mmu_get_scsi_one)(dev,vaddr,len)
#define mmu_get_scsi_sgl(dev,sg,sz) BTFIXUP_CALL(mmu_get_scsi_sgl)(dev,sg,sz)
#define mmu_release_scsi_one(dev,vaddr,len) BTFIXUP_CALL(mmu_release_scsi_one)(dev,vaddr,len)
#define mmu_release_scsi_sgl(dev,sg,sz) BTFIXUP_CALL(mmu_release_scsi_sgl)(dev,sg,sz)
#define mmu_get_scsi_one(dev,vaddr,len) \
sparc32_dma_ops->get_scsi_one(dev, vaddr, len)
#define mmu_get_scsi_sgl(dev,sg,sz) \
sparc32_dma_ops->get_scsi_sgl(dev, sg, sz)
#define mmu_release_scsi_one(dev,vaddr,len) \
sparc32_dma_ops->release_scsi_one(dev, vaddr,len)
#define mmu_release_scsi_sgl(dev,sg,sz) \
sparc32_dma_ops->release_scsi_sgl(dev, sg, sz)
#ifdef CONFIG_SBUS
/*
* mmu_map/unmap are provided by iommu/iounit; Invalid to call on IIep.
*
@ -123,17 +127,17 @@ BTFIXUPDEF_CALL(void, mmu_release_scsi_sgl, struct device *, struct scatterlist
* Second mapping is for device visible address, or "bus" address.
* The bus address is returned at '*pba'.
*
* These functions seem distinct, but are hard to split. On sun4c,
* at least for now, 'a' is equal to bus address, and retured in *pba.
* These functions seem distinct, but are hard to split.
* On sun4m, page attributes depend on the CPU type, so we have to
* know if we are mapping RAM or I/O, so it has to be an additional argument
* to a separate mapping function for CPU visible mappings.
*/
BTFIXUPDEF_CALL(int, mmu_map_dma_area, struct device *, dma_addr_t *, unsigned long, unsigned long, int len)
BTFIXUPDEF_CALL(void, mmu_unmap_dma_area, struct device *, unsigned long busa, int len)
#define sbus_map_dma_area(dev,pba,va,a,len) \
sparc32_dma_ops->map_dma_area(dev, pba, va, a, len)
#define sbus_unmap_dma_area(dev,ba,len) \
sparc32_dma_ops->unmap_dma_area(dev, ba, len)
#endif /* CONFIG_SBUS */
#define mmu_map_dma_area(dev,pba,va,a,len) BTFIXUP_CALL(mmu_map_dma_area)(dev,pba,va,a,len)
#define mmu_unmap_dma_area(dev,ba,len) BTFIXUP_CALL(mmu_unmap_dma_area)(dev,ba,len)
#endif
#endif /* !(_ASM_SPARC_DMA_H) */

View File

@ -118,16 +118,9 @@ typedef struct {
instruction set this cpu supports. This can NOT be done in userspace
on Sparc. */
/* Sun4c has none of the capabilities, most sun4m's have them all.
* XXX This is gross, set some global variable at boot time. -DaveM
*/
#define ELF_HWCAP ((ARCH_SUN4C) ? 0 : \
(HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | \
HWCAP_SPARC_SWAP | \
((srmmu_modtype != Cypress && \
srmmu_modtype != Cypress_vE && \
srmmu_modtype != Cypress_vD) ? \
HWCAP_SPARC_MULDIV : 0)))
/* Most sun4m's have them all. */
#define ELF_HWCAP (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | \
HWCAP_SPARC_SWAP | HWCAP_SPARC_MULDIV)
/* This yields a string that ld.so will use to load implementation
specific libraries for optimization. This is more specific in

View File

@ -12,7 +12,6 @@
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/idprom.h>
#include <asm/machines.h>
#include <asm/oplib.h>
#include <asm/auxio.h>
#include <asm/irq.h>
@ -103,25 +102,13 @@ static struct sun_floppy_ops sun_fdops;
/* Routines unique to each controller type on a Sun. */
static void sun_set_dor(unsigned char value, int fdc_82077)
{
if (sparc_cpu_model == sun4c) {
unsigned int bits = 0;
if (value & 0x10)
bits |= AUXIO_FLPY_DSEL;
if ((value & 0x80) == 0)
bits |= AUXIO_FLPY_EJCT;
set_auxio(bits, (~bits) & (AUXIO_FLPY_DSEL|AUXIO_FLPY_EJCT));
}
if (fdc_82077) {
if (fdc_82077)
sun_fdc->dor_82077 = value;
}
}
static unsigned char sun_read_dir(void)
{
if (sparc_cpu_model == sun4c)
return (get_auxio() & AUXIO_FLPY_DCHG) ? 0x80 : 0;
else
return sun_fdc->dir_82077;
return sun_fdc->dir_82077;
}
static unsigned char sun_82072_fd_inb(int port)
@ -242,10 +229,7 @@ static inline void virtual_dma_init(void)
static inline void sun_fd_disable_dma(void)
{
doing_pdma = 0;
if (pdma_base) {
mmu_unlockarea(pdma_base, pdma_areasize);
pdma_base = NULL;
}
pdma_base = NULL;
}
static inline void sun_fd_set_dma_mode(int mode)
@ -275,7 +259,6 @@ static inline void sun_fd_set_dma_count(int length)
static inline void sun_fd_enable_dma(void)
{
pdma_vaddr = mmu_lockarea(pdma_vaddr, pdma_size);
pdma_base = pdma_vaddr;
pdma_areasize = pdma_size;
}
@ -301,38 +284,36 @@ static int sun_floppy_init(void)
{
struct platform_device *op;
struct device_node *dp;
char state[128];
phandle tnode, fd_node;
int num_regs;
struct resource r;
char state[128];
phandle fd_node;
phandle tnode;
int num_regs;
use_virtual_dma = 1;
/* Forget it if we aren't on a machine that could possibly
* ever have a floppy drive.
*/
if((sparc_cpu_model != sun4c && sparc_cpu_model != sun4m) ||
((idprom->id_machtype == (SM_SUN4C | SM_4C_SLC)) ||
(idprom->id_machtype == (SM_SUN4C | SM_4C_ELC)))) {
if (sparc_cpu_model != sun4m) {
/* We certainly don't have a floppy controller. */
goto no_sun_fdc;
}
/* Well, try to find one. */
tnode = prom_getchild(prom_root_node);
fd_node = prom_searchsiblings(tnode, "obio");
if(fd_node != 0) {
if (fd_node != 0) {
tnode = prom_getchild(fd_node);
fd_node = prom_searchsiblings(tnode, "SUNW,fdtwo");
} else {
fd_node = prom_searchsiblings(tnode, "fd");
}
if(fd_node == 0) {
if (fd_node == 0) {
goto no_sun_fdc;
}
/* The sun4m lets us know if the controller is actually usable. */
if(sparc_cpu_model == sun4m &&
prom_getproperty(fd_node, "status", state, sizeof(state)) != -1) {
if (prom_getproperty(fd_node, "status", state, sizeof(state)) != -1) {
if(!strcmp(state, "disabled")) {
goto no_sun_fdc;
}
@ -343,12 +324,12 @@ static int sun_floppy_init(void)
memset(&r, 0, sizeof(r));
r.flags = fd_regs[0].which_io;
r.start = fd_regs[0].phys_addr;
sun_fdc = (struct sun_flpy_controller *)
of_ioremap(&r, 0, fd_regs[0].reg_size, "floppy");
sun_fdc = of_ioremap(&r, 0, fd_regs[0].reg_size, "floppy");
/* Look up irq in platform_device.
* We try "SUNW,fdtwo" and "fd"
*/
op = NULL;
for_each_node_by_name(dp, "SUNW,fdtwo") {
op = of_find_device_by_node(dp);
if (op)
@ -367,7 +348,7 @@ static int sun_floppy_init(void)
FLOPPY_IRQ = op->archdata.irqs[0];
/* Last minute sanity check... */
if(sun_fdc->status_82072 == 0xff) {
if (sun_fdc->status_82072 == 0xff) {
sun_fdc = NULL;
goto no_sun_fdc;
}

View File

@ -161,10 +161,7 @@ unsigned long pdma_areasize;
static void sun_fd_disable_dma(void)
{
doing_pdma = 0;
if (pdma_base) {
mmu_unlockarea(pdma_base, pdma_areasize);
pdma_base = NULL;
}
pdma_base = NULL;
}
static void sun_fd_set_dma_mode(int mode)
@ -194,7 +191,6 @@ static void sun_fd_set_dma_count(int length)
static void sun_fd_enable_dma(void)
{
pdma_vaddr = mmu_lockarea(pdma_vaddr, pdma_size);
pdma_base = pdma_vaddr;
pdma_areasize = pdma_size;
}

View File

@ -2,15 +2,8 @@
#define __SPARC_HEAD_H
#define KERNBASE 0xf0000000 /* First address the kernel will eventually be */
#define LOAD_ADDR 0x4000 /* prom jumps to us here unless this is elf /boot */
#define SUN4C_SEGSZ (1 << 18)
#define SRMMU_L1_KBASE_OFFSET ((KERNBASE>>24)<<2) /* Used in boot remapping. */
#define INTS_ENAB 0x01 /* entry.S uses this. */
#define SUN4_PROM_VECTOR 0xFFE81000 /* SUN4 PROM needs to be hardwired */
#define WRITE_PAUSE nop; nop; nop; /* Have to do this after %wim/%psr chg */
#define NOP_INSN 0x01000000 /* Used to patch sparc_save_state */
/* Here are some trap goodies */
@ -18,9 +11,7 @@
#define TRAP_ENTRY(type, label) \
rd %psr, %l0; b label; rd %wim, %l3; nop;
/* Data/text faults. Defaults to sun4c version at boot time. */
#define SPARC_TFAULT rd %psr, %l0; rd %wim, %l3; b sun4c_fault; mov 1, %l7;
#define SPARC_DFAULT rd %psr, %l0; rd %wim, %l3; b sun4c_fault; mov 0, %l7;
/* Data/text faults */
#define SRMMU_TFAULT rd %psr, %l0; rd %wim, %l3; b srmmu_fault; mov 1, %l7;
#define SRMMU_DFAULT rd %psr, %l0; rd %wim, %l3; b srmmu_fault; mov 0, %l7;
@ -80,16 +71,6 @@
#define TRAP_ENTRY_INTERRUPT(int_level) \
mov int_level, %l7; rd %psr, %l0; b real_irq_entry; rd %wim, %l3;
/* NMI's (Non Maskable Interrupts) are special, you can't keep them
* from coming in, and basically if you get one, the shows over. ;(
* On the sun4c they are usually asynchronous memory errors, on the
* the sun4m they could be either due to mem errors or a software
* initiated interrupt from the prom/kern on an SMP box saying "I
* command you to do CPU tricks, read your mailbox for more info."
*/
#define NMI_TRAP \
rd %wim, %l3; b linux_trap_nmi_sun4c; mov %psr, %l0; nop;
/* Window overflows/underflows are special and we need to try to be as
* efficient as possible here....
*/

View File

@ -10,19 +10,6 @@
#ifdef CONFIG_SPARC_LEON
#define ASI_LEON_NOCACHE 0x01
#define ASI_LEON_DCACHE_MISS 0x1
#define ASI_LEON_CACHEREGS 0x02
#define ASI_LEON_IFLUSH 0x10
#define ASI_LEON_DFLUSH 0x11
#define ASI_LEON_MMUFLUSH 0x18
#define ASI_LEON_MMUREGS 0x19
#define ASI_LEON_BYPASS 0x1c
#define ASI_LEON_FLUSH_PAGE 0x10
/* mmu register access, ASI_LEON_MMUREGS */
#define LEON_CNR_CTRL 0x000
#define LEON_CNR_CTXP 0x100
@ -57,29 +44,6 @@
#define LEON_IRQMASK_R 0x0000fffe /* bit 15- 1 of lregs.irqmask */
#define LEON_IRQPRIO_R 0xfffe0000 /* bit 31-17 of lregs.irqmask */
/* leon uart register definitions */
#define LEON_OFF_UDATA 0x0
#define LEON_OFF_USTAT 0x4
#define LEON_OFF_UCTRL 0x8
#define LEON_OFF_USCAL 0xc
#define LEON_UCTRL_RE 0x01
#define LEON_UCTRL_TE 0x02
#define LEON_UCTRL_RI 0x04
#define LEON_UCTRL_TI 0x08
#define LEON_UCTRL_PS 0x10
#define LEON_UCTRL_PE 0x20
#define LEON_UCTRL_FL 0x40
#define LEON_UCTRL_LB 0x80
#define LEON_USTAT_DR 0x01
#define LEON_USTAT_TS 0x02
#define LEON_USTAT_TH 0x04
#define LEON_USTAT_BR 0x08
#define LEON_USTAT_OV 0x10
#define LEON_USTAT_PE 0x20
#define LEON_USTAT_FE 0x40
#define LEON_MCFG2_SRAMDIS 0x00002000
#define LEON_MCFG2_SDRAMEN 0x00004000
#define LEON_MCFG2_SRAMBANKSZ 0x00001e00 /* [12-9] */
@ -89,8 +53,6 @@
#define LEON_TCNT0_MASK 0x7fffff
#define LEON_USTAT_ERROR (LEON_USTAT_OV | LEON_USTAT_PE | LEON_USTAT_FE)
/* no break yet */
#define ASI_LEON3_SYSCTRL 0x02
#define ASI_LEON3_SYSCTRL_ICFG 0x08
@ -278,6 +240,8 @@ static inline int sparc_leon3_cpuid(void)
#define LEON2_CFG_SSIZE_MASK 0x00007000UL
#ifndef __ASSEMBLY__
struct vm_area_struct;
extern unsigned long srmmu_swprobe(unsigned long vaddr, unsigned long *paddr);
extern void leon_flush_icache_all(void);
extern void leon_flush_dcache_all(void);
@ -285,15 +249,6 @@ extern void leon_flush_cache_all(void);
extern void leon_flush_tlb_all(void);
extern int leon_flush_during_switch;
extern int leon_flush_needed(void);
struct vm_area_struct;
extern void leon_flush_icache_all(void);
extern void leon_flush_dcache_all(void);
extern void leon_flush_pcache_all(struct vm_area_struct *vma, unsigned long page);
extern void leon_flush_cache_all(void);
extern void leon_flush_tlb_all(void);
extern int leon_flush_during_switch;
extern int leon_flush_needed(void);
extern void leon_flush_pcache_all(struct vm_area_struct *vma, unsigned long page);
/* struct that hold LEON3 cache configuration registers */
@ -321,22 +276,12 @@ extern unsigned int leon_build_device_irq(unsigned int real_irq,
extern void leon_update_virq_handling(unsigned int virq,
irq_flow_handler_t flow_handler,
const char *name, int do_ack);
extern void leon_clear_clock_irq(void);
extern void leon_load_profile_irq(int cpu, unsigned int limit);
extern void leon_init_timers(irq_handler_t counter_fn);
extern void leon_clear_clock_irq(void);
extern void leon_load_profile_irq(int cpu, unsigned int limit);
extern void leon_init_timers(void);
extern void leon_trans_init(struct device_node *dp);
extern void leon_node_init(struct device_node *dp, struct device_node ***nextp);
extern void leon_init_IRQ(void);
extern void leon_init(void);
extern unsigned long srmmu_swprobe(unsigned long vaddr, unsigned long *paddr);
extern void init_leon(void);
extern void poke_leonsparc(void);
extern void leon3_getCacheRegs(struct leon3_cacheregs *regs);
extern int leon_flush_needed(void);
extern void leon_switch_mm(void);
extern int srmmu_swprobe_trace;
extern int leon3_ticker_irq;
#ifdef CONFIG_SMP

View File

@ -12,11 +12,6 @@ struct Sun_Machine_Models {
unsigned char id_machtype;
};
/* Current number of machines we know about that has an IDPROM
* machtype entry including one entry for the 0x80 OBP machines.
*/
#define NUM_SUN_MACHINES 16
/* The machine type in the idprom area looks like this:
*
* ---------------
@ -24,36 +19,20 @@ struct Sun_Machine_Models {
* ---------------
* 7 4 3 0
*
* The ARCH field determines the architecture line (sun4, sun4c, etc).
* The ARCH field determines the architecture line (sun4m, etc).
* The MACH field determines the machine make within that architecture.
*/
#define SM_ARCH_MASK 0xf0
#define SM_SUN4 0x20
#define M_LEON 0x30
#define SM_SUN4C 0x50
#define SM_SUN4M 0x70
#define SM_SUN4M_OBP 0x80
#define SM_TYP_MASK 0x0f
/* Sun4 machines */
#define SM_4_260 0x01 /* Sun 4/200 series */
#define SM_4_110 0x02 /* Sun 4/100 series */
#define SM_4_330 0x03 /* Sun 4/300 series */
#define SM_4_470 0x04 /* Sun 4/400 series */
/* Leon machines */
#define M_LEON3_SOC 0x02 /* Leon3 SoC */
/* Sun4c machines Full Name - PROM NAME */
#define SM_4C_SS1 0x01 /* Sun4c SparcStation 1 - Sun 4/60 */
#define SM_4C_IPC 0x02 /* Sun4c SparcStation IPC - Sun 4/40 */
#define SM_4C_SS1PLUS 0x03 /* Sun4c SparcStation 1+ - Sun 4/65 */
#define SM_4C_SLC 0x04 /* Sun4c SparcStation SLC - Sun 4/20 */
#define SM_4C_SS2 0x05 /* Sun4c SparcStation 2 - Sun 4/75 */
#define SM_4C_ELC 0x06 /* Sun4c SparcStation ELC - Sun 4/25 */
#define SM_4C_IPX 0x07 /* Sun4c SparcStation IPX - Sun 4/50 */
/* Sun4m machines, these predate the OpenBoot. These values only mean
* something if the value in the ARCH field is SM_SUN4M, if it is
* SM_SUN4M_OBP then you have the following situation:

View File

@ -8,14 +8,10 @@
#define _SPARC_MBUS_H
#include <asm/ross.h> /* HyperSparc stuff */
#include <asm/cypress.h> /* Cypress Chips */
#include <asm/viking.h> /* Ugh, bug city... */
enum mbus_module {
HyperSparc = 0,
Cypress = 1,
Cypress_vE = 2,
Cypress_vD = 3,
Swift_ok = 4,
Swift_bad_c = 5,
Swift_lots_o_bugs = 6,

View File

@ -1,51 +0,0 @@
#ifndef _SPARC_MEMREG_H
#define _SPARC_MEMREG_H
/* memreg.h: Definitions of the values found in the synchronous
* and asynchronous memory error registers when a fault
* occurs on the sun4c.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
/* First the synchronous error codes, these are usually just
* normal page faults.
*/
#define SUN4C_SYNC_WDRESET 0x0001 /* watchdog reset */
#define SUN4C_SYNC_SIZE 0x0002 /* bad access size? whuz this? */
#define SUN4C_SYNC_PARITY 0x0008 /* bad ram chips caused a parity error */
#define SUN4C_SYNC_SBUS 0x0010 /* the SBUS had some problems... */
#define SUN4C_SYNC_NOMEM 0x0020 /* translation to non-existent ram */
#define SUN4C_SYNC_PROT 0x0040 /* access violated pte protections */
#define SUN4C_SYNC_NPRESENT 0x0080 /* pte said that page was not present */
#define SUN4C_SYNC_BADWRITE 0x8000 /* while writing something went bogus */
#define SUN4C_SYNC_BOLIXED \
(SUN4C_SYNC_WDRESET | SUN4C_SYNC_SIZE | SUN4C_SYNC_SBUS | \
SUN4C_SYNC_NOMEM | SUN4C_SYNC_PARITY)
/* Now the asynchronous error codes, these are almost always produced
* by the cache writing things back to memory and getting a bad translation.
* Bad DVMA transactions can cause these faults too.
*/
#define SUN4C_ASYNC_BADDVMA 0x0010 /* error during DVMA access */
#define SUN4C_ASYNC_NOMEM 0x0020 /* write back pointed to bad phys addr */
#define SUN4C_ASYNC_BADWB 0x0080 /* write back points to non-present page */
/* Memory parity error register with associated bit constants. */
#ifndef __ASSEMBLY__
extern __volatile__ unsigned long __iomem *sun4c_memerr_reg;
#endif
#define SUN4C_MPE_ERROR 0x80 /* Parity error detected. (ro) */
#define SUN4C_MPE_MULTI 0x40 /* Multiple parity errors detected. (ro) */
#define SUN4C_MPE_TEST 0x20 /* Write inverse parity. (rw) */
#define SUN4C_MPE_CHECK 0x10 /* Enable parity checking. (rw) */
#define SUN4C_MPE_ERR00 0x08 /* Parity error in bits 0-7. (ro) */
#define SUN4C_MPE_ERR08 0x04 /* Parity error in bits 8-15. (ro) */
#define SUN4C_MPE_ERR16 0x02 /* Parity error in bits 16-23. (ro) */
#define SUN4C_MPE_ERR24 0x01 /* Parity error in bits 24-31. (ro) */
#define SUN4C_MPE_ERRS 0x0F /* Bit mask for the error bits. (ro) */
#endif /* !(_SPARC_MEMREG_H) */

View File

@ -1,8 +1,6 @@
#ifndef __SPARC_MMU_CONTEXT_H
#define __SPARC_MMU_CONTEXT_H
#include <asm/btfixup.h>
#ifndef __ASSEMBLY__
#include <asm-generic/mm_hooks.h>
@ -23,14 +21,11 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
* all the page tables have been flushed. Our job is to destroy
* any remaining processor-specific state.
*/
BTFIXUPDEF_CALL(void, destroy_context, struct mm_struct *)
#define destroy_context(mm) BTFIXUP_CALL(destroy_context)(mm)
void destroy_context(struct mm_struct *mm);
/* Switch the current MM context. */
BTFIXUPDEF_CALL(void, switch_mm, struct mm_struct *, struct mm_struct *, struct task_struct *)
#define switch_mm(old_mm, mm, tsk) BTFIXUP_CALL(switch_mm)(old_mm, mm, tsk)
void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
struct task_struct *tsk);
#define deactivate_mm(tsk,mm) do { } while (0)

View File

@ -220,19 +220,6 @@ static inline void cc_set_igen(unsigned gen)
"i" (ASI_M_MXCC));
}
/* +-------+-------------+-----------+------------------------------------+
* | bcast | devid | sid | levels mask |
* +-------+-------------+-----------+------------------------------------+
* 31 30 23 22 15 14 0
*/
#define IGEN_MESSAGE(bcast, devid, sid, levels) \
(((bcast) << 31) | ((devid) << 23) | ((sid) << 15) | (levels))
static inline void sun4d_send_ipi(int cpu, int level)
{
cc_set_igen(IGEN_MESSAGE(0, cpu << 3, 6 + ((level >> 1) & 7), 1 << (level - 1)));
}
#endif /* !__ASSEMBLY__ */
#endif /* !(_SPARC_OBIO_H) */

View File

@ -105,14 +105,6 @@ extern void prom_write(const char *buf, unsigned int len);
extern int prom_startcpu(int cpunode, struct linux_prom_registers *context_table,
int context, char *program_counter);
/* Sun4/sun4c specific memory-management startup hook. */
/* Map the passed segment in the given context at the passed
* virtual address.
*/
extern void prom_putsegment(int context, unsigned long virt_addr,
int physical_segment);
/* Initialize the memory lists based upon the prom version. */
void prom_meminit(void);

View File

@ -14,8 +14,6 @@
#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
#include <asm/btfixup.h>
#ifndef __ASSEMBLY__
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
@ -45,12 +43,6 @@ struct sparc_phys_banks {
extern struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS+1];
/* Cache alias structure. Entry is valid if context != -1. */
struct cache_palias {
unsigned long vaddr;
int context;
};
/* passing structs on the Sparc slow us down tremendously... */
/* #define STRICT_MM_TYPECHECKS */
@ -116,10 +108,7 @@ typedef unsigned long iopgprot_t;
typedef struct page *pgtable_t;
extern unsigned long sparc_unmapped_base;
BTFIXUPDEF_SETHI(sparc_unmapped_base)
#define TASK_UNMAPPED_BASE BTFIXUP_SETHI(sparc_unmapped_base)
#define TASK_UNMAPPED_BASE sparc_unmapped_base
#else /* !(__ASSEMBLY__) */

View File

@ -4,8 +4,10 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <asm/pgtsrmmu.h>
#include <asm/pgtable.h>
#include <asm/vaddrs.h>
#include <asm/page.h>
#include <asm/btfixup.h>
struct page;
@ -15,54 +17,74 @@ extern struct pgtable_cache_struct {
unsigned long pgtable_cache_sz;
unsigned long pgd_cache_sz;
} pgt_quicklists;
unsigned long srmmu_get_nocache(int size, int align);
void srmmu_free_nocache(unsigned long vaddr, int size);
#define pgd_quicklist (pgt_quicklists.pgd_cache)
#define pmd_quicklist ((unsigned long *)0)
#define pte_quicklist (pgt_quicklists.pte_cache)
#define pgtable_cache_size (pgt_quicklists.pgtable_cache_sz)
#define pgd_cache_size (pgt_quicklists.pgd_cache_sz)
extern void check_pgt_cache(void);
BTFIXUPDEF_CALL(void, do_check_pgt_cache, int, int)
#define do_check_pgt_cache(low,high) BTFIXUP_CALL(do_check_pgt_cache)(low,high)
#define check_pgt_cache() do { } while (0)
BTFIXUPDEF_CALL(pgd_t *, get_pgd_fast, void)
#define get_pgd_fast() BTFIXUP_CALL(get_pgd_fast)()
BTFIXUPDEF_CALL(void, free_pgd_fast, pgd_t *)
#define free_pgd_fast(pgd) BTFIXUP_CALL(free_pgd_fast)(pgd)
pgd_t *get_pgd_fast(void);
static inline void free_pgd_fast(pgd_t *pgd)
{
srmmu_free_nocache((unsigned long)pgd, SRMMU_PGD_TABLE_SIZE);
}
#define pgd_free(mm, pgd) free_pgd_fast(pgd)
#define pgd_alloc(mm) get_pgd_fast()
BTFIXUPDEF_CALL(void, pgd_set, pgd_t *, pmd_t *)
#define pgd_set(pgdp,pmdp) BTFIXUP_CALL(pgd_set)(pgdp,pmdp)
static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
{
unsigned long pa = __nocache_pa((unsigned long)pmdp);
set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (pa >> 4)));
}
#define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
BTFIXUPDEF_CALL(pmd_t *, pmd_alloc_one, struct mm_struct *, unsigned long)
#define pmd_alloc_one(mm, address) BTFIXUP_CALL(pmd_alloc_one)(mm, address)
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
unsigned long address)
{
return (pmd_t *)srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE,
SRMMU_PMD_TABLE_SIZE);
}
BTFIXUPDEF_CALL(void, free_pmd_fast, pmd_t *)
#define free_pmd_fast(pmd) BTFIXUP_CALL(free_pmd_fast)(pmd)
static inline void free_pmd_fast(pmd_t * pmd)
{
srmmu_free_nocache((unsigned long)pmd, SRMMU_PMD_TABLE_SIZE);
}
#define pmd_free(mm, pmd) free_pmd_fast(pmd)
#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
BTFIXUPDEF_CALL(void, pmd_populate, pmd_t *, struct page *)
#define pmd_populate(MM, PMD, PTE) BTFIXUP_CALL(pmd_populate)(PMD, PTE)
void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep);
#define pmd_pgtable(pmd) pmd_page(pmd)
BTFIXUPDEF_CALL(void, pmd_set, pmd_t *, pte_t *)
#define pmd_populate_kernel(MM, PMD, PTE) BTFIXUP_CALL(pmd_set)(PMD, PTE)
BTFIXUPDEF_CALL(pgtable_t , pte_alloc_one, struct mm_struct *, unsigned long)
#define pte_alloc_one(mm, address) BTFIXUP_CALL(pte_alloc_one)(mm, address)
BTFIXUPDEF_CALL(pte_t *, pte_alloc_one_kernel, struct mm_struct *, unsigned long)
#define pte_alloc_one_kernel(mm, addr) BTFIXUP_CALL(pte_alloc_one_kernel)(mm, addr)
void pmd_set(pmd_t *pmdp, pte_t *ptep);
#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE)
BTFIXUPDEF_CALL(void, free_pte_fast, pte_t *)
#define pte_free_kernel(mm, pte) BTFIXUP_CALL(free_pte_fast)(pte)
pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address);
BTFIXUPDEF_CALL(void, pte_free, pgtable_t )
#define pte_free(mm, pte) BTFIXUP_CALL(pte_free)(pte)
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
return (pte_t *)srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
}
static inline void free_pte_fast(pte_t *pte)
{
srmmu_free_nocache((unsigned long)pte, PTE_SIZE);
}
#define pte_free_kernel(mm, pte) free_pte_fast(pte)
void pte_free(struct mm_struct * mm, pgtable_t pte);
#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
#endif /* _SPARC_PGALLOC_H */

View File

@ -16,11 +16,9 @@
#include <linux/spinlock.h>
#include <linux/swap.h>
#include <asm/types.h>
#include <asm/pgtsun4c.h>
#include <asm/pgtsrmmu.h>
#include <asm/vac-ops.h>
#include <asm/vaddrs.h>
#include <asm/oplib.h>
#include <asm/btfixup.h>
#include <asm/cpu_type.h>
@ -30,87 +28,55 @@ struct page;
extern void load_mmu(void);
extern unsigned long calc_highpages(void);
BTFIXUPDEF_SIMM13(pgdir_shift)
BTFIXUPDEF_SETHI(pgdir_size)
BTFIXUPDEF_SETHI(pgdir_mask)
BTFIXUPDEF_SIMM13(ptrs_per_pmd)
BTFIXUPDEF_SIMM13(ptrs_per_pgd)
BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
#define pte_ERROR(e) __builtin_trap()
#define pmd_ERROR(e) __builtin_trap()
#define pgd_ERROR(e) __builtin_trap()
BTFIXUPDEF_INT(page_none)
BTFIXUPDEF_INT(page_copy)
BTFIXUPDEF_INT(page_readonly)
BTFIXUPDEF_INT(page_kernel)
#define PMD_SHIFT SUN4C_PMD_SHIFT
#define PMD_SHIFT 22
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
#define PMD_ALIGN(__addr) (((__addr) + ~PMD_MASK) & PMD_MASK)
#define PGDIR_SHIFT BTFIXUP_SIMM13(pgdir_shift)
#define PGDIR_SIZE BTFIXUP_SETHI(pgdir_size)
#define PGDIR_MASK BTFIXUP_SETHI(pgdir_mask)
#define PGDIR_SHIFT SRMMU_PGDIR_SHIFT
#define PGDIR_SIZE SRMMU_PGDIR_SIZE
#define PGDIR_MASK SRMMU_PGDIR_MASK
#define PTRS_PER_PTE 1024
#define PTRS_PER_PMD BTFIXUP_SIMM13(ptrs_per_pmd)
#define PTRS_PER_PGD BTFIXUP_SIMM13(ptrs_per_pgd)
#define USER_PTRS_PER_PGD BTFIXUP_SIMM13(user_ptrs_per_pgd)
#define PTRS_PER_PMD SRMMU_PTRS_PER_PMD
#define PTRS_PER_PGD SRMMU_PTRS_PER_PGD
#define USER_PTRS_PER_PGD PAGE_OFFSET / SRMMU_PGDIR_SIZE
#define FIRST_USER_ADDRESS 0
#define PTE_SIZE (PTRS_PER_PTE*4)
#define PAGE_NONE __pgprot(BTFIXUP_INT(page_none))
extern pgprot_t PAGE_SHARED;
#define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
#define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
extern unsigned long page_kernel;
#ifdef MODULE
#define PAGE_KERNEL page_kernel
#else
#define PAGE_KERNEL __pgprot(BTFIXUP_INT(page_kernel))
#endif
#define PAGE_NONE SRMMU_PAGE_NONE
#define PAGE_SHARED SRMMU_PAGE_SHARED
#define PAGE_COPY SRMMU_PAGE_COPY
#define PAGE_READONLY SRMMU_PAGE_RDONLY
#define PAGE_KERNEL SRMMU_PAGE_KERNEL
/* Top-level page directory */
extern pgd_t swapper_pg_dir[1024];
extern void paging_init(void);
/* Page table for 0-4MB for everybody, on the Sparc this
* holds the same as on the i386.
*/
extern pte_t pg0[1024];
extern pte_t pg1[1024];
extern pte_t pg2[1024];
extern pte_t pg3[1024];
extern unsigned long ptr_in_current_pgd;
/* Here is a trick, since mmap.c need the initializer elements for
* protection_map[] to be constant at compile time, I set the following
* to all zeros. I set it to the real values after I link in the
* appropriate MMU page table routines at boot time.
*/
#define __P000 __pgprot(0)
#define __P001 __pgprot(0)
#define __P010 __pgprot(0)
#define __P011 __pgprot(0)
#define __P100 __pgprot(0)
#define __P101 __pgprot(0)
#define __P110 __pgprot(0)
#define __P111 __pgprot(0)
/* xwr */
#define __P000 PAGE_NONE
#define __P001 PAGE_READONLY
#define __P010 PAGE_COPY
#define __P011 PAGE_COPY
#define __P100 PAGE_READONLY
#define __P101 PAGE_READONLY
#define __P110 PAGE_COPY
#define __P111 PAGE_COPY
#define __S000 __pgprot(0)
#define __S001 __pgprot(0)
#define __S010 __pgprot(0)
#define __S011 __pgprot(0)
#define __S100 __pgprot(0)
#define __S101 __pgprot(0)
#define __S110 __pgprot(0)
#define __S111 __pgprot(0)
#define __S000 PAGE_NONE
#define __S001 PAGE_READONLY
#define __S010 PAGE_SHARED
#define __S011 PAGE_SHARED
#define __S100 PAGE_READONLY
#define __S101 PAGE_READONLY
#define __S110 PAGE_SHARED
#define __S111 PAGE_SHARED
extern int num_contexts;
@ -137,82 +103,137 @@ extern unsigned long empty_zero_page;
#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))
/*
* In general all page table modifications should use the V8 atomic
* swap instruction. This insures the mmu and the cpu are in sync
* with respect to ref/mod bits in the page tables.
*/
BTFIXUPDEF_CALL_CONST(struct page *, pmd_page, pmd_t)
BTFIXUPDEF_CALL_CONST(unsigned long, pgd_page_vaddr, pgd_t)
static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
{
__asm__ __volatile__("swap [%2], %0" : "=&r" (value) : "0" (value), "r" (addr));
return value;
}
#define pmd_page(pmd) BTFIXUP_CALL(pmd_page)(pmd)
#define pgd_page_vaddr(pgd) BTFIXUP_CALL(pgd_page_vaddr)(pgd)
/* Certain architectures need to do special things when pte's
* within a page table are directly modified. Thus, the following
* hook is made available.
*/
BTFIXUPDEF_CALL_CONST(int, pte_present, pte_t)
BTFIXUPDEF_CALL(void, pte_clear, pte_t *)
static inline void set_pte(pte_t *ptep, pte_t pteval)
{
srmmu_swap((unsigned long *)ptep, pte_val(pteval));
}
#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
static inline int srmmu_device_memory(unsigned long x)
{
return ((x & 0xF0000000) != 0);
}
static inline struct page *pmd_page(pmd_t pmd)
{
if (srmmu_device_memory(pmd_val(pmd)))
BUG();
return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4));
}
static inline unsigned long pgd_page_vaddr(pgd_t pgd)
{
if (srmmu_device_memory(pgd_val(pgd))) {
return ~0;
} else {
unsigned long v = pgd_val(pgd) & SRMMU_PTD_PMASK;
return (unsigned long)__nocache_va(v << 4);
}
}
static inline int pte_present(pte_t pte)
{
return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE);
}
static inline int pte_none(pte_t pte)
{
return !pte_val(pte);
}
#define pte_present(pte) BTFIXUP_CALL(pte_present)(pte)
#define pte_clear(mm,addr,pte) BTFIXUP_CALL(pte_clear)(pte)
static inline void __pte_clear(pte_t *ptep)
{
set_pte(ptep, __pte(0));
}
BTFIXUPDEF_CALL_CONST(int, pmd_bad, pmd_t)
BTFIXUPDEF_CALL_CONST(int, pmd_present, pmd_t)
BTFIXUPDEF_CALL(void, pmd_clear, pmd_t *)
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
__pte_clear(ptep);
}
static inline int pmd_bad(pmd_t pmd)
{
return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
}
static inline int pmd_present(pmd_t pmd)
{
return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
}
static inline int pmd_none(pmd_t pmd)
{
return !pmd_val(pmd);
}
#define pmd_bad(pmd) BTFIXUP_CALL(pmd_bad)(pmd)
#define pmd_present(pmd) BTFIXUP_CALL(pmd_present)(pmd)
#define pmd_clear(pmd) BTFIXUP_CALL(pmd_clear)(pmd)
static inline void pmd_clear(pmd_t *pmdp)
{
int i;
for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++)
set_pte((pte_t *)&pmdp->pmdv[i], __pte(0));
}
BTFIXUPDEF_CALL_CONST(int, pgd_none, pgd_t)
BTFIXUPDEF_CALL_CONST(int, pgd_bad, pgd_t)
BTFIXUPDEF_CALL_CONST(int, pgd_present, pgd_t)
BTFIXUPDEF_CALL(void, pgd_clear, pgd_t *)
static inline int pgd_none(pgd_t pgd)
{
return !(pgd_val(pgd) & 0xFFFFFFF);
}
#define pgd_none(pgd) BTFIXUP_CALL(pgd_none)(pgd)
#define pgd_bad(pgd) BTFIXUP_CALL(pgd_bad)(pgd)
#define pgd_present(pgd) BTFIXUP_CALL(pgd_present)(pgd)
#define pgd_clear(pgd) BTFIXUP_CALL(pgd_clear)(pgd)
static inline int pgd_bad(pgd_t pgd)
{
return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
}
static inline int pgd_present(pgd_t pgd)
{
return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
}
static inline void pgd_clear(pgd_t *pgdp)
{
set_pte((pte_t *)pgdp, __pte(0));
}
/*
* The following only work if pte_present() is true.
* Undefined behaviour if not..
*/
BTFIXUPDEF_HALF(pte_writei)
BTFIXUPDEF_HALF(pte_dirtyi)
BTFIXUPDEF_HALF(pte_youngi)
static int pte_write(pte_t pte) __attribute_const__;
static inline int pte_write(pte_t pte)
{
return pte_val(pte) & BTFIXUP_HALF(pte_writei);
return pte_val(pte) & SRMMU_WRITE;
}
static int pte_dirty(pte_t pte) __attribute_const__;
static inline int pte_dirty(pte_t pte)
{
return pte_val(pte) & BTFIXUP_HALF(pte_dirtyi);
return pte_val(pte) & SRMMU_DIRTY;
}
static int pte_young(pte_t pte) __attribute_const__;
static inline int pte_young(pte_t pte)
{
return pte_val(pte) & BTFIXUP_HALF(pte_youngi);
return pte_val(pte) & SRMMU_REF;
}
/*
* The following only work if pte_present() is not true.
*/
BTFIXUPDEF_HALF(pte_filei)
static int pte_file(pte_t pte) __attribute_const__;
static inline int pte_file(pte_t pte)
{
return pte_val(pte) & BTFIXUP_HALF(pte_filei);
return pte_val(pte) & SRMMU_FILE;
}
static inline int pte_special(pte_t pte)
@ -220,68 +241,85 @@ static inline int pte_special(pte_t pte)
return 0;
}
/*
*/
BTFIXUPDEF_HALF(pte_wrprotecti)
BTFIXUPDEF_HALF(pte_mkcleani)
BTFIXUPDEF_HALF(pte_mkoldi)
static pte_t pte_wrprotect(pte_t pte) __attribute_const__;
static inline pte_t pte_wrprotect(pte_t pte)
{
return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_wrprotecti));
return __pte(pte_val(pte) & ~SRMMU_WRITE);
}
static pte_t pte_mkclean(pte_t pte) __attribute_const__;
static inline pte_t pte_mkclean(pte_t pte)
{
return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_mkcleani));
return __pte(pte_val(pte) & ~SRMMU_DIRTY);
}
static pte_t pte_mkold(pte_t pte) __attribute_const__;
static inline pte_t pte_mkold(pte_t pte)
{
return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_mkoldi));
return __pte(pte_val(pte) & ~SRMMU_REF);
}
BTFIXUPDEF_CALL_CONST(pte_t, pte_mkwrite, pte_t)
BTFIXUPDEF_CALL_CONST(pte_t, pte_mkdirty, pte_t)
BTFIXUPDEF_CALL_CONST(pte_t, pte_mkyoung, pte_t)
static inline pte_t pte_mkwrite(pte_t pte)
{
return __pte(pte_val(pte) | SRMMU_WRITE);
}
#define pte_mkwrite(pte) BTFIXUP_CALL(pte_mkwrite)(pte)
#define pte_mkdirty(pte) BTFIXUP_CALL(pte_mkdirty)(pte)
#define pte_mkyoung(pte) BTFIXUP_CALL(pte_mkyoung)(pte)
static inline pte_t pte_mkdirty(pte_t pte)
{
return __pte(pte_val(pte) | SRMMU_DIRTY);
}
static inline pte_t pte_mkyoung(pte_t pte)
{
return __pte(pte_val(pte) | SRMMU_REF);
}
#define pte_mkspecial(pte) (pte)
#define pfn_pte(pfn, prot) mk_pte(pfn_to_page(pfn), prot)
BTFIXUPDEF_CALL(unsigned long, pte_pfn, pte_t)
#define pte_pfn(pte) BTFIXUP_CALL(pte_pfn)(pte)
static inline unsigned long pte_pfn(pte_t pte)
{
if (srmmu_device_memory(pte_val(pte))) {
/* Just return something that will cause
* pfn_valid() to return false. This makes
* copy_one_pte() to just directly copy to
* PTE over.
*/
return ~0UL;
}
return (pte_val(pte) & SRMMU_PTE_PMASK) >> (PAGE_SHIFT-4);
}
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*/
BTFIXUPDEF_CALL_CONST(pte_t, mk_pte, struct page *, pgprot_t)
static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
{
return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot));
}
BTFIXUPDEF_CALL_CONST(pte_t, mk_pte_phys, unsigned long, pgprot_t)
BTFIXUPDEF_CALL_CONST(pte_t, mk_pte_io, unsigned long, pgprot_t, int)
BTFIXUPDEF_CALL_CONST(pgprot_t, pgprot_noncached, pgprot_t)
static inline pte_t mk_pte_phys(unsigned long page, pgprot_t pgprot)
{
return __pte(((page) >> 4) | pgprot_val(pgprot));
}
#define mk_pte(page,pgprot) BTFIXUP_CALL(mk_pte)(page,pgprot)
#define mk_pte_phys(page,pgprot) BTFIXUP_CALL(mk_pte_phys)(page,pgprot)
#define mk_pte_io(page,pgprot,space) BTFIXUP_CALL(mk_pte_io)(page,pgprot,space)
static inline pte_t mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
{
return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot));
}
#define pgprot_noncached(pgprot) BTFIXUP_CALL(pgprot_noncached)(pgprot)
BTFIXUPDEF_INT(pte_modify_mask)
#define pgprot_noncached pgprot_noncached
static inline pgprot_t pgprot_noncached(pgprot_t prot)
{
prot &= ~__pgprot(SRMMU_CACHE);
return prot;
}
static pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__;
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
return __pte((pte_val(pte) & BTFIXUP_INT(pte_modify_mask)) |
return __pte((pte_val(pte) & SRMMU_CHG_MASK) |
pgprot_val(newprot));
}
@ -294,74 +332,69 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
/* Find an entry in the second-level page table.. */
BTFIXUPDEF_CALL(pmd_t *, pmd_offset, pgd_t *, unsigned long)
#define pmd_offset(dir,addr) BTFIXUP_CALL(pmd_offset)(dir,addr)
static inline pmd_t *pmd_offset(pgd_t * dir, unsigned long address)
{
return (pmd_t *) pgd_page_vaddr(*dir) +
((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
}
/* Find an entry in the third-level page table.. */
BTFIXUPDEF_CALL(pte_t *, pte_offset_kernel, pmd_t *, unsigned long)
#define pte_offset_kernel(dir,addr) BTFIXUP_CALL(pte_offset_kernel)(dir,addr)
pte_t *pte_offset_kernel(pmd_t * dir, unsigned long address);
/*
* This shortcut works on sun4m (and sun4d) because the nocache area is static,
* and sun4c is guaranteed to have no highmem anyway.
* This shortcut works on sun4m (and sun4d) because the nocache area is static.
*/
#define pte_offset_map(d, a) pte_offset_kernel(d,a)
#define pte_unmap(pte) do{}while(0)
/* Certain architectures need to do special things when pte's
* within a page table are directly modified. Thus, the following
* hook is made available.
*/
BTFIXUPDEF_CALL(void, set_pte, pte_t *, pte_t)
#define set_pte(ptep,pteval) BTFIXUP_CALL(set_pte)(ptep,pteval)
#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
struct seq_file;
BTFIXUPDEF_CALL(void, mmu_info, struct seq_file *)
#define mmu_info(p) BTFIXUP_CALL(mmu_info)(p)
void mmu_info(struct seq_file *m);
/* Fault handler stuff... */
#define FAULT_CODE_PROT 0x1
#define FAULT_CODE_WRITE 0x2
#define FAULT_CODE_USER 0x4
BTFIXUPDEF_CALL(void, update_mmu_cache, struct vm_area_struct *, unsigned long, pte_t *)
#define update_mmu_cache(vma, address, ptep) do { } while (0)
#define update_mmu_cache(vma,addr,ptep) BTFIXUP_CALL(update_mmu_cache)(vma,addr,ptep)
BTFIXUPDEF_CALL(void, sparc_mapiorange, unsigned int, unsigned long,
unsigned long, unsigned int)
BTFIXUPDEF_CALL(void, sparc_unmapiorange, unsigned long, unsigned int)
#define sparc_mapiorange(bus,pa,va,len) BTFIXUP_CALL(sparc_mapiorange)(bus,pa,va,len)
#define sparc_unmapiorange(va,len) BTFIXUP_CALL(sparc_unmapiorange)(va,len)
extern int invalid_segment;
void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
unsigned long xva, unsigned int len);
void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len);
/* Encode and de-code a swap entry */
BTFIXUPDEF_CALL(unsigned long, __swp_type, swp_entry_t)
BTFIXUPDEF_CALL(unsigned long, __swp_offset, swp_entry_t)
BTFIXUPDEF_CALL(swp_entry_t, __swp_entry, unsigned long, unsigned long)
static inline unsigned long __swp_type(swp_entry_t entry)
{
return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK;
}
#define __swp_type(__x) BTFIXUP_CALL(__swp_type)(__x)
#define __swp_offset(__x) BTFIXUP_CALL(__swp_offset)(__x)
#define __swp_entry(__type,__off) BTFIXUP_CALL(__swp_entry)(__type,__off)
static inline unsigned long __swp_offset(swp_entry_t entry)
{
return (entry.val >> SRMMU_SWP_OFF_SHIFT) & SRMMU_SWP_OFF_MASK;
}
static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
{
return (swp_entry_t) {
(type & SRMMU_SWP_TYPE_MASK) << SRMMU_SWP_TYPE_SHIFT
| (offset & SRMMU_SWP_OFF_MASK) << SRMMU_SWP_OFF_SHIFT };
}
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
/* file-offset-in-pte helpers */
BTFIXUPDEF_CALL(unsigned long, pte_to_pgoff, pte_t pte);
BTFIXUPDEF_CALL(pte_t, pgoff_to_pte, unsigned long pgoff);
static inline unsigned long pte_to_pgoff(pte_t pte)
{
return pte_val(pte) >> SRMMU_PTE_FILE_SHIFT;
}
#define pte_to_pgoff(pte) BTFIXUP_CALL(pte_to_pgoff)(pte)
#define pgoff_to_pte(off) BTFIXUP_CALL(pgoff_to_pte)(off)
static inline pte_t pgoff_to_pte(unsigned long pgoff)
{
return __pte((pgoff << SRMMU_PTE_FILE_SHIFT) | SRMMU_FILE);
}
/*
* This is made a constant because mm/fremap.c required a constant.
* Note that layout of these bits is different between sun4c.c and srmmu.c.
*/
#define PTE_FILE_MAX_BITS 24
@ -399,9 +432,6 @@ static inline unsigned long
__get_phys (unsigned long addr)
{
switch (sparc_cpu_model){
case sun4:
case sun4c:
return sun4c_get_pte (addr) << PAGE_SHIFT;
case sun4m:
case sun4d:
return ((srmmu_get_pte (addr) & 0xffffff00) << 4);
@ -414,9 +444,6 @@ static inline int
__get_iospace (unsigned long addr)
{
switch (sparc_cpu_model){
case sun4:
case sun4c:
return -1; /* Don't check iospace on sun4c */
case sun4m:
case sun4d:
return (srmmu_get_pte (addr) >> 28);
@ -463,7 +490,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
flush_tlb_page(__vma, __address); \
} \
(sparc_cpu_model == sun4c) || __changed; \
__changed; \
})
#include <asm-generic/pgtable.h>
@ -471,10 +498,8 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
#endif /* !(__ASSEMBLY__) */
#define VMALLOC_START _AC(0xfe600000,UL)
/* XXX Alter this when I get around to fixing sun4c - Anton */
#define VMALLOC_END _AC(0xffc00000,UL)
/* We provide our own get_unmapped_area to cope with VA holes for userland */
#define HAVE_ARCH_UNMAPPED_AREA

View File

@ -717,10 +717,6 @@ extern unsigned long find_ecache_flush_span(unsigned long size);
struct seq_file;
extern void mmu_info(struct seq_file *);
/* These do nothing with the way I have things setup. */
#define mmu_lockarea(vaddr, len) (vaddr)
#define mmu_unlockarea(vaddr, len) do { } while(0)
struct vm_area_struct;
extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);

View File

@ -173,17 +173,6 @@ static inline void srmmu_set_ctable_ptr(unsigned long paddr)
"memory");
}
static inline unsigned long srmmu_get_ctable_ptr(void)
{
unsigned int retval;
__asm__ __volatile__("lda [%1] %2, %0\n\t" :
"=r" (retval) :
"r" (SRMMU_CTXTBL_PTR),
"i" (ASI_M_MMUREGS));
return (retval & SRMMU_CTX_PMASK) << 4;
}
static inline void srmmu_set_context(int context)
{
__asm__ __volatile__("sta %0, [%1] %2\n\t" : :
@ -231,42 +220,6 @@ static inline void srmmu_flush_whole_tlb(void)
}
/* These flush types are not available on all chips... */
static inline void srmmu_flush_tlb_ctx(void)
{
__asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
"r" (0x300), /* Flush TLB ctx.. */
"i" (ASI_M_FLUSH_PROBE) : "memory");
}
static inline void srmmu_flush_tlb_region(unsigned long addr)
{
addr &= SRMMU_PGDIR_MASK;
__asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
"r" (addr | 0x200), /* Flush TLB region.. */
"i" (ASI_M_FLUSH_PROBE) : "memory");
}
static inline void srmmu_flush_tlb_segment(unsigned long addr)
{
addr &= SRMMU_REAL_PMD_MASK;
__asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
"r" (addr | 0x100), /* Flush TLB segment.. */
"i" (ASI_M_FLUSH_PROBE) : "memory");
}
static inline void srmmu_flush_tlb_page(unsigned long page)
{
page &= PAGE_MASK;
__asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
"r" (page), /* Flush TLB page.. */
"i" (ASI_M_FLUSH_PROBE) : "memory");
}
#ifndef CONFIG_SPARC_LEON
static inline unsigned long srmmu_hwprobe(unsigned long vaddr)
{
@ -294,9 +247,6 @@ srmmu_get_pte (unsigned long addr)
return entry;
}
extern unsigned long (*srmmu_read_physical)(unsigned long paddr);
extern void (*srmmu_write_physical)(unsigned long paddr, unsigned long word);
#endif /* !(__ASSEMBLY__) */
#endif /* !(_SPARC_PGTSRMMU_H) */

View File

@ -1,172 +0,0 @@
/*
* pgtsun4c.h: Sun4c specific pgtable.h defines and code.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
#ifndef _SPARC_PGTSUN4C_H
#define _SPARC_PGTSUN4C_H
#include <asm/contregs.h>
/* PMD_SHIFT determines the size of the area a second-level page table can map */
#define SUN4C_PMD_SHIFT 22
/* PGDIR_SHIFT determines what a third-level page table entry can map */
#define SUN4C_PGDIR_SHIFT 22
#define SUN4C_PGDIR_SIZE (1UL << SUN4C_PGDIR_SHIFT)
#define SUN4C_PGDIR_MASK (~(SUN4C_PGDIR_SIZE-1))
#define SUN4C_PGDIR_ALIGN(addr) (((addr)+SUN4C_PGDIR_SIZE-1)&SUN4C_PGDIR_MASK)
/* To represent how the sun4c mmu really lays things out. */
#define SUN4C_REAL_PGDIR_SHIFT 18
#define SUN4C_REAL_PGDIR_SIZE (1UL << SUN4C_REAL_PGDIR_SHIFT)
#define SUN4C_REAL_PGDIR_MASK (~(SUN4C_REAL_PGDIR_SIZE-1))
#define SUN4C_REAL_PGDIR_ALIGN(addr) (((addr)+SUN4C_REAL_PGDIR_SIZE-1)&SUN4C_REAL_PGDIR_MASK)
/* 16 bit PFN on sun4c */
#define SUN4C_PFN_MASK 0xffff
/* Don't increase these unless the structures in sun4c.c are fixed */
#define SUN4C_MAX_SEGMAPS 256
#define SUN4C_MAX_CONTEXTS 16
/*
* To be efficient, and not have to worry about allocating such
* a huge pgd, we make the kernel sun4c tables each hold 1024
* entries and the pgd similarly just like the i386 tables.
*/
#define SUN4C_PTRS_PER_PTE 1024
#define SUN4C_PTRS_PER_PMD 1
#define SUN4C_PTRS_PER_PGD 1024
/*
* Sparc SUN4C pte fields.
*/
#define _SUN4C_PAGE_VALID 0x80000000
#define _SUN4C_PAGE_SILENT_READ 0x80000000 /* synonym */
#define _SUN4C_PAGE_DIRTY 0x40000000
#define _SUN4C_PAGE_SILENT_WRITE 0x40000000 /* synonym */
#define _SUN4C_PAGE_PRIV 0x20000000 /* privileged page */
#define _SUN4C_PAGE_NOCACHE 0x10000000 /* non-cacheable page */
#define _SUN4C_PAGE_PRESENT 0x08000000 /* implemented in software */
#define _SUN4C_PAGE_IO 0x04000000 /* I/O page */
#define _SUN4C_PAGE_FILE 0x02000000 /* implemented in software */
#define _SUN4C_PAGE_READ 0x00800000 /* implemented in software */
#define _SUN4C_PAGE_WRITE 0x00400000 /* implemented in software */
#define _SUN4C_PAGE_ACCESSED 0x00200000 /* implemented in software */
#define _SUN4C_PAGE_MODIFIED 0x00100000 /* implemented in software */
#define _SUN4C_READABLE (_SUN4C_PAGE_READ|_SUN4C_PAGE_SILENT_READ|\
_SUN4C_PAGE_ACCESSED)
#define _SUN4C_WRITEABLE (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_SILENT_WRITE|\
_SUN4C_PAGE_MODIFIED)
#define _SUN4C_PAGE_CHG_MASK (0xffff|_SUN4C_PAGE_ACCESSED|_SUN4C_PAGE_MODIFIED)
#define SUN4C_PAGE_NONE __pgprot(_SUN4C_PAGE_PRESENT)
#define SUN4C_PAGE_SHARED __pgprot(_SUN4C_PAGE_PRESENT|_SUN4C_READABLE|\
_SUN4C_PAGE_WRITE)
#define SUN4C_PAGE_COPY __pgprot(_SUN4C_PAGE_PRESENT|_SUN4C_READABLE)
#define SUN4C_PAGE_READONLY __pgprot(_SUN4C_PAGE_PRESENT|_SUN4C_READABLE)
#define SUN4C_PAGE_KERNEL __pgprot(_SUN4C_READABLE|_SUN4C_WRITEABLE|\
_SUN4C_PAGE_DIRTY|_SUN4C_PAGE_PRIV)
/* SUN4C swap entry encoding
*
* We use 5 bits for the type and 19 for the offset. This gives us
* 32 swapfiles of 4GB each. Encoding looks like:
*
* RRRRRRRRooooooooooooooooooottttt
* fedcba9876543210fedcba9876543210
*
* The top 8 bits are reserved for protection and status bits, especially
* FILE and PRESENT.
*/
#define SUN4C_SWP_TYPE_MASK 0x1f
#define SUN4C_SWP_OFF_MASK 0x7ffff
#define SUN4C_SWP_OFF_SHIFT 5
#ifndef __ASSEMBLY__
static inline unsigned long sun4c_get_synchronous_error(void)
{
unsigned long sync_err;
__asm__ __volatile__("lda [%1] %2, %0\n\t" :
"=r" (sync_err) :
"r" (AC_SYNC_ERR), "i" (ASI_CONTROL));
return sync_err;
}
static inline unsigned long sun4c_get_synchronous_address(void)
{
unsigned long sync_addr;
__asm__ __volatile__("lda [%1] %2, %0\n\t" :
"=r" (sync_addr) :
"r" (AC_SYNC_VA), "i" (ASI_CONTROL));
return sync_addr;
}
/* SUN4C pte, segmap, and context manipulation */
static inline unsigned long sun4c_get_segmap(unsigned long addr)
{
register unsigned long entry;
__asm__ __volatile__("\n\tlduba [%1] %2, %0\n\t" :
"=r" (entry) :
"r" (addr), "i" (ASI_SEGMAP));
return entry;
}
static inline void sun4c_put_segmap(unsigned long addr, unsigned long entry)
{
__asm__ __volatile__("\n\tstba %1, [%0] %2; nop; nop; nop;\n\t" : :
"r" (addr), "r" (entry),
"i" (ASI_SEGMAP)
: "memory");
}
static inline unsigned long sun4c_get_pte(unsigned long addr)
{
register unsigned long entry;
__asm__ __volatile__("\n\tlda [%1] %2, %0\n\t" :
"=r" (entry) :
"r" (addr), "i" (ASI_PTE));
return entry;
}
static inline void sun4c_put_pte(unsigned long addr, unsigned long entry)
{
__asm__ __volatile__("\n\tsta %1, [%0] %2; nop; nop; nop;\n\t" : :
"r" (addr),
"r" ((entry & ~(_SUN4C_PAGE_PRESENT))), "i" (ASI_PTE)
: "memory");
}
static inline int sun4c_get_context(void)
{
register int ctx;
__asm__ __volatile__("\n\tlduba [%1] %2, %0\n\t" :
"=r" (ctx) :
"r" (AC_CONTEXT), "i" (ASI_CONTROL));
return ctx;
}
static inline int sun4c_set_context(int ctx)
{
__asm__ __volatile__("\n\tstba %0, [%1] %2; nop; nop; nop;\n\t" : :
"r" (ctx), "r" (AC_CONTEXT), "i" (ASI_CONTROL)
: "memory");
return ctx;
}
#endif /* !(__ASSEMBLY__) */
#endif /* !(_SPARC_PGTSUN4C_H) */

View File

@ -16,7 +16,6 @@
#include <asm/ptrace.h>
#include <asm/head.h>
#include <asm/signal.h>
#include <asm/btfixup.h>
#include <asm/page.h>
/*

View File

@ -20,10 +20,7 @@ extern char reboot_command[];
* Only sun4d + leon may have boot_cpu_id != 0
*/
extern unsigned char boot_cpu_id;
extern unsigned char boot_cpu_id4;
extern unsigned long empty_bad_page;
extern unsigned long empty_bad_page_table;
extern unsigned long empty_zero_page;
extern int serial_console;

View File

@ -4,8 +4,6 @@
#define __ARCH_FORCE_SHMLBA 1
extern int vac_cache_size;
#define SHMLBA (vac_cache_size ? vac_cache_size : \
(sparc_cpu_model == sun4c ? (64 * 1024) : \
(sparc_cpu_model == sun4 ? (128 * 1024) : PAGE_SIZE)))
#define SHMLBA (vac_cache_size ? vac_cache_size : PAGE_SIZE)
#endif /* _ASMSPARC_SHMPARAM_H */

View File

@ -8,7 +8,6 @@
#include <linux/threads.h>
#include <asm/head.h>
#include <asm/btfixup.h>
#ifndef __ASSEMBLY__
@ -58,27 +57,43 @@ struct seq_file;
void smp_bogo(struct seq_file *);
void smp_info(struct seq_file *);
BTFIXUPDEF_CALL(void, smp_cross_call, smpfunc_t, cpumask_t, unsigned long, unsigned long, unsigned long, unsigned long)
BTFIXUPDEF_CALL(int, __hard_smp_processor_id, void)
BTFIXUPDEF_CALL(void, smp_ipi_resched, int);
BTFIXUPDEF_CALL(void, smp_ipi_single, int);
BTFIXUPDEF_CALL(void, smp_ipi_mask_one, int);
BTFIXUPDEF_BLACKBOX(hard_smp_processor_id)
BTFIXUPDEF_BLACKBOX(load_current)
struct sparc32_ipi_ops {
void (*cross_call)(smpfunc_t func, cpumask_t mask, unsigned long arg1,
unsigned long arg2, unsigned long arg3,
unsigned long arg4);
void (*resched)(int cpu);
void (*single)(int cpu);
void (*mask_one)(int cpu);
};
extern const struct sparc32_ipi_ops *sparc32_ipi_ops;
#define smp_cross_call(func,mask,arg1,arg2,arg3,arg4) BTFIXUP_CALL(smp_cross_call)(func,mask,arg1,arg2,arg3,arg4)
static inline void xc0(smpfunc_t func)
{
sparc32_ipi_ops->cross_call(func, *cpu_online_mask, 0, 0, 0, 0);
}
static inline void xc0(smpfunc_t func) { smp_cross_call(func, *cpu_online_mask, 0, 0, 0, 0); }
static inline void xc1(smpfunc_t func, unsigned long arg1)
{ smp_cross_call(func, *cpu_online_mask, arg1, 0, 0, 0); }
{
sparc32_ipi_ops->cross_call(func, *cpu_online_mask, arg1, 0, 0, 0);
}
static inline void xc2(smpfunc_t func, unsigned long arg1, unsigned long arg2)
{ smp_cross_call(func, *cpu_online_mask, arg1, arg2, 0, 0); }
{
sparc32_ipi_ops->cross_call(func, *cpu_online_mask, arg1, arg2, 0, 0);
}
static inline void xc3(smpfunc_t func, unsigned long arg1, unsigned long arg2,
unsigned long arg3)
{ smp_cross_call(func, *cpu_online_mask, arg1, arg2, arg3, 0); }
unsigned long arg3)
{
sparc32_ipi_ops->cross_call(func, *cpu_online_mask,
arg1, arg2, arg3, 0);
}
static inline void xc4(smpfunc_t func, unsigned long arg1, unsigned long arg2,
unsigned long arg3, unsigned long arg4)
{ smp_cross_call(func, *cpu_online_mask, arg1, arg2, arg3, arg4); }
unsigned long arg3, unsigned long arg4)
{
sparc32_ipi_ops->cross_call(func, *cpu_online_mask,
arg1, arg2, arg3, arg4);
}
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
@ -88,74 +103,7 @@ static inline int cpu_logical_map(int cpu)
return cpu;
}
static inline int hard_smp4m_processor_id(void)
{
int cpuid;
__asm__ __volatile__("rd %%tbr, %0\n\t"
"srl %0, 12, %0\n\t"
"and %0, 3, %0\n\t" :
"=&r" (cpuid));
return cpuid;
}
static inline int hard_smp4d_processor_id(void)
{
int cpuid;
__asm__ __volatile__("lda [%%g0] %1, %0\n\t" :
"=&r" (cpuid) : "i" (ASI_M_VIKING_TMP1));
return cpuid;
}
extern inline int hard_smpleon_processor_id(void)
{
int cpuid;
__asm__ __volatile__("rd %%asr17,%0\n\t"
"srl %0,28,%0" :
"=&r" (cpuid) : );
return cpuid;
}
#ifndef MODULE
static inline int hard_smp_processor_id(void)
{
int cpuid;
/* Black box - sun4m
__asm__ __volatile__("rd %%tbr, %0\n\t"
"srl %0, 12, %0\n\t"
"and %0, 3, %0\n\t" :
"=&r" (cpuid));
- sun4d
__asm__ __volatile__("lda [%g0] ASI_M_VIKING_TMP1, %0\n\t"
"nop; nop" :
"=&r" (cpuid));
- leon
__asm__ __volatile__( "rd %asr17, %0\n\t"
"srl %0, 0x1c, %0\n\t"
"nop\n\t" :
"=&r" (cpuid));
See btfixup.h and btfixupprep.c to understand how a blackbox works.
*/
__asm__ __volatile__("sethi %%hi(___b_hard_smp_processor_id), %0\n\t"
"sethi %%hi(boot_cpu_id), %0\n\t"
"ldub [%0 + %%lo(boot_cpu_id)], %0\n\t" :
"=&r" (cpuid));
return cpuid;
}
#else
static inline int hard_smp_processor_id(void)
{
int cpuid;
__asm__ __volatile__("mov %%o7, %%g1\n\t"
"call ___f___hard_smp_processor_id\n\t"
" nop\n\t"
"mov %%g2, %0\n\t" : "=r"(cpuid) : : "g1", "g2");
return cpuid;
}
#endif
extern int hard_smp_processor_id(void);
#define raw_smp_processor_id() (current_thread_info()->cpu)

View File

@ -1,54 +0,0 @@
/*
* smpprim.h: SMP locking primitives on the Sparc
*
* God knows we won't be actually using this code for some time
* but I thought I'd write it since I knew how.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
#ifndef __SPARC_SMPPRIM_H
#define __SPARC_SMPPRIM_H
/* Test and set the unsigned byte at ADDR to 1. Returns the previous
* value. On the Sparc we use the ldstub instruction since it is
* atomic.
*/
static inline __volatile__ char test_and_set(void *addr)
{
char state = 0;
__asm__ __volatile__("ldstub [%0], %1 ! test_and_set\n\t"
"=r" (addr), "=r" (state) :
"0" (addr), "1" (state) : "memory");
return state;
}
/* Initialize a spin-lock. */
static inline __volatile__ smp_initlock(void *spinlock)
{
/* Unset the lock. */
*((unsigned char *) spinlock) = 0;
return;
}
/* This routine spins until it acquires the lock at ADDR. */
static inline __volatile__ smp_lock(void *addr)
{
while(test_and_set(addr) == 0xff)
;
/* We now have the lock */
return;
}
/* This routine releases the lock at ADDR. */
static inline __volatile__ smp_unlock(void *addr)
{
*((unsigned char *) addr) = 0;
}
#endif /* !(__SPARC_SMPPRIM_H) */

View File

@ -61,68 +61,7 @@ extern int memcmp(const void *,const void *,__kernel_size_t);
extern __kernel_size_t strlen(const char *);
#define __HAVE_ARCH_STRNCMP
extern int __strncmp(const char *, const char *, __kernel_size_t);
static inline int __constant_strncmp(const char *src, const char *dest, __kernel_size_t count)
{
register int retval;
switch(count) {
case 0: return 0;
case 1: return (src[0] - dest[0]);
case 2: retval = (src[0] - dest[0]);
if(!retval && src[0])
retval = (src[1] - dest[1]);
return retval;
case 3: retval = (src[0] - dest[0]);
if(!retval && src[0]) {
retval = (src[1] - dest[1]);
if(!retval && src[1])
retval = (src[2] - dest[2]);
}
return retval;
case 4: retval = (src[0] - dest[0]);
if(!retval && src[0]) {
retval = (src[1] - dest[1]);
if(!retval && src[1]) {
retval = (src[2] - dest[2]);
if (!retval && src[2])
retval = (src[3] - dest[3]);
}
}
return retval;
case 5: retval = (src[0] - dest[0]);
if(!retval && src[0]) {
retval = (src[1] - dest[1]);
if(!retval && src[1]) {
retval = (src[2] - dest[2]);
if (!retval && src[2]) {
retval = (src[3] - dest[3]);
if (!retval && src[3])
retval = (src[4] - dest[4]);
}
}
}
return retval;
default:
retval = (src[0] - dest[0]);
if(!retval && src[0]) {
retval = (src[1] - dest[1]);
if(!retval && src[1]) {
retval = (src[2] - dest[2]);
if(!retval && src[2])
retval = __strncmp(src+3,dest+3,count-3);
}
}
return retval;
}
}
#undef strncmp
#define strncmp(__arg0, __arg1, __arg2) \
(__builtin_constant_p(__arg2) ? \
__constant_strncmp(__arg0, __arg1, __arg2) : \
__strncmp(__arg0, __arg1, __arg2))
extern int strncmp(const char *, const char *, __kernel_size_t);
#endif /* !EXPORT_SYMTAB_STROPS */

View File

@ -1,15 +0,0 @@
/*
* sysen.h: Bit fields within the "System Enable" register accessed via
* the ASI_CONTROL address space at address AC_SYSENABLE.
*
* Copyright (C) 1994 David S. Miller (davem@caip.rutgers.edu)
*/
#ifndef _SPARC_SYSEN_H
#define _SPARC_SYSEN_H
#define SENABLE_DVMA 0x20 /* enable dvma transfers */
#define SENABLE_CACHE 0x10 /* enable VAC cache */
#define SENABLE_RESET 0x04 /* reset whole machine, danger Will Robinson */
#endif /* _SPARC_SYSEN_H */

View File

@ -15,7 +15,6 @@
#ifndef __ASSEMBLY__
#include <asm/btfixup.h>
#include <asm/ptrace.h>
#include <asm/page.h>
@ -82,11 +81,8 @@ register struct thread_info *current_thread_info_reg asm("g6");
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
BTFIXUPDEF_CALL(struct thread_info *, alloc_thread_info_node, int)
#define alloc_thread_info_node(tsk, node) BTFIXUP_CALL(alloc_thread_info_node)(node)
BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)
#define free_thread_info(ti) BTFIXUP_CALL(free_thread_info)(ti)
struct thread_info * alloc_thread_info_node(struct task_struct *tsk, int node);
void free_thread_info(struct thread_info *);
#endif /* __ASSEMBLY__ */

View File

@ -8,14 +8,37 @@
#ifndef _SPARC_TIMER_H
#define _SPARC_TIMER_H
#include <linux/clocksource.h>
#include <linux/irqreturn.h>
#include <asm-generic/percpu.h>
#include <asm/cpu_type.h> /* For SUN4M_NCPUS */
#include <asm/btfixup.h>
#define SBUS_CLOCK_RATE 2000000 /* 2MHz */
#define TIMER_VALUE_SHIFT 9
#define TIMER_VALUE_MASK 0x3fffff
#define TIMER_LIMIT_BIT (1 << 31) /* Bit 31 in Counter-Timer register */
/* The counter timer register has the value offset by 9 bits.
* From sun4m manual:
* When a counter reaches the value in the corresponding limit register,
* the Limit bit is set and the counter is set to 500 nS (i.e. 0x00000200).
*
* To compensate for this add one to the value.
*/
static inline unsigned int timer_value(unsigned int value)
{
return (value + 1) << TIMER_VALUE_SHIFT;
}
extern __volatile__ unsigned int *master_l10_counter;
/* FIXME: Make do_[gs]ettimeofday btfixup calls */
struct timespec;
BTFIXUPDEF_CALL(int, bus_do_settimeofday, struct timespec *tv)
#define bus_do_settimeofday(tv) BTFIXUP_CALL(bus_do_settimeofday)(tv)
extern irqreturn_t notrace timer_interrupt(int dummy, void *dev_id);
#ifdef CONFIG_SMP
DECLARE_PER_CPU(struct clock_event_device, sparc32_clockevent);
extern void register_percpu_ce(int cpu);
#endif
#endif /* !(_SPARC_TIMER_H) */

View File

@ -12,5 +12,4 @@
typedef unsigned long cycles_t;
#define get_cycles() (0)
extern u32 (*do_arch_gettimeoffset)(void);
#endif

View File

@ -1,52 +1,16 @@
#ifndef _SPARC_TLBFLUSH_H
#define _SPARC_TLBFLUSH_H
#include <linux/mm.h>
// #include <asm/processor.h>
#include <asm/cachetlb_32.h>
/*
* TLB flushing:
*
* - flush_tlb() flushes the current mm struct TLBs XXX Exists?
* - flush_tlb_all() flushes all processes TLBs
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(vma, start, end) flushes a range of pages
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
*/
#ifdef CONFIG_SMP
BTFIXUPDEF_CALL(void, local_flush_tlb_all, void)
BTFIXUPDEF_CALL(void, local_flush_tlb_mm, struct mm_struct *)
BTFIXUPDEF_CALL(void, local_flush_tlb_range, struct vm_area_struct *, unsigned long, unsigned long)
BTFIXUPDEF_CALL(void, local_flush_tlb_page, struct vm_area_struct *, unsigned long)
#define local_flush_tlb_all() BTFIXUP_CALL(local_flush_tlb_all)()
#define local_flush_tlb_mm(mm) BTFIXUP_CALL(local_flush_tlb_mm)(mm)
#define local_flush_tlb_range(vma,start,end) BTFIXUP_CALL(local_flush_tlb_range)(vma,start,end)
#define local_flush_tlb_page(vma,addr) BTFIXUP_CALL(local_flush_tlb_page)(vma,addr)
extern void smp_flush_tlb_all(void);
extern void smp_flush_tlb_mm(struct mm_struct *mm);
extern void smp_flush_tlb_range(struct vm_area_struct *vma,
unsigned long start,
unsigned long end);
extern void smp_flush_tlb_page(struct vm_area_struct *mm, unsigned long page);
#endif /* CONFIG_SMP */
BTFIXUPDEF_CALL(void, flush_tlb_all, void)
BTFIXUPDEF_CALL(void, flush_tlb_mm, struct mm_struct *)
BTFIXUPDEF_CALL(void, flush_tlb_range, struct vm_area_struct *, unsigned long, unsigned long)
BTFIXUPDEF_CALL(void, flush_tlb_page, struct vm_area_struct *, unsigned long)
#define flush_tlb_all() BTFIXUP_CALL(flush_tlb_all)()
#define flush_tlb_mm(mm) BTFIXUP_CALL(flush_tlb_mm)(mm)
#define flush_tlb_range(vma,start,end) BTFIXUP_CALL(flush_tlb_range)(vma,start,end)
#define flush_tlb_page(vma,addr) BTFIXUP_CALL(flush_tlb_page)(vma,addr)
// #define flush_tlb() flush_tlb_mm(current->active_mm) /* XXX Sure? */
#define flush_tlb_all() \
sparc32_cachetlb_ops->tlb_all()
#define flush_tlb_mm(mm) \
sparc32_cachetlb_ops->tlb_mm(mm)
#define flush_tlb_range(vma, start, end) \
sparc32_cachetlb_ops->tlb_range(vma, start, end)
#define flush_tlb_page(vma, addr) \
sparc32_cachetlb_ops->tlb_page(vma, addr)
/*
* This is a kludge, until I know better. --zaitcev XXX

View File

@ -12,7 +12,6 @@
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <asm/vac-ops.h>
#endif
#ifndef __ASSEMBLY__

View File

@ -1,127 +0,0 @@
#ifndef _SPARC_VAC_OPS_H
#define _SPARC_VAC_OPS_H
/* vac-ops.h: Inline assembly routines to do operations on the Sparc
* VAC (virtual address cache) for the sun4c.
*
* Copyright (C) 1994, David S. Miller (davem@caip.rutgers.edu)
*/
#include <asm/sysen.h>
#include <asm/contregs.h>
#include <asm/asi.h>
/* The SUN4C models have a virtually addressed write-through
* cache.
*
* The cache tags are directly accessible through an ASI and
* each have the form:
*
* ------------------------------------------------------------
* | MBZ | CONTEXT | WRITE | PRIV | VALID | MBZ | TagID | MBZ |
* ------------------------------------------------------------
* 31 25 24 22 21 20 19 18 16 15 2 1 0
*
* MBZ: These bits are either unused and/or reserved and should
* be written as zeroes.
*
* CONTEXT: Records the context to which this cache line belongs.
*
* WRITE: A copy of the writable bit from the mmu pte access bits.
*
* PRIV: A copy of the privileged bit from the pte access bits.
*
* VALID: If set, this line is valid, else invalid.
*
* TagID: Fourteen bits of tag ID.
*
* Every virtual address is seen by the cache like this:
*
* ----------------------------------------
* | RESV | TagID | LINE | BYTE-in-LINE |
* ----------------------------------------
* 31 30 29 16 15 4 3 0
*
* RESV: Unused/reserved.
*
* TagID: Used to match the Tag-ID in that vac tags.
*
* LINE: Which line within the cache
*
* BYTE-in-LINE: Which byte within the cache line.
*/
/* Sun4c VAC Tags */
#define S4CVACTAG_CID 0x01c00000
#define S4CVACTAG_W 0x00200000
#define S4CVACTAG_P 0x00100000
#define S4CVACTAG_V 0x00080000
#define S4CVACTAG_TID 0x0000fffc
/* Sun4c VAC Virtual Address */
/* These aren't used, why bother? (Anton) */
#if 0
#define S4CVACVA_TID 0x3fff0000
#define S4CVACVA_LINE 0x0000fff0
#define S4CVACVA_BIL 0x0000000f
#endif
/* The indexing of cache lines creates a problem. Because the line
* field of a virtual address extends past the page offset within
* the virtual address it is possible to have what are called
* 'bad aliases' which will create inconsistencies. So we must make
* sure that within a context that if a physical page is mapped
* more than once, that 'extra' line bits are the same. If this is
* not the case, and thus is a 'bad alias' we must turn off the
* cacheable bit in the pte's of all such pages.
*/
#define S4CVAC_BADBITS 0x0000f000
/* The following is true if vaddr1 and vaddr2 would cause
* a 'bad alias'.
*/
#define S4CVAC_BADALIAS(vaddr1, vaddr2) \
((((unsigned long) (vaddr1)) ^ ((unsigned long) (vaddr2))) & \
(S4CVAC_BADBITS))
/* The following structure describes the characteristics of a sun4c
* VAC as probed from the prom during boot time.
*/
struct sun4c_vac_props {
unsigned int num_bytes; /* Size of the cache */
unsigned int do_hwflushes; /* Hardware flushing available? */
unsigned int linesize; /* Size of each line in bytes */
unsigned int log2lsize; /* log2(linesize) */
unsigned int on; /* VAC is enabled */
};
extern struct sun4c_vac_props sun4c_vacinfo;
/* sun4c_enable_vac() enables the sun4c virtual address cache. */
static inline void sun4c_enable_vac(void)
{
__asm__ __volatile__("lduba [%0] %1, %%g1\n\t"
"or %%g1, %2, %%g1\n\t"
"stba %%g1, [%0] %1\n\t"
: /* no outputs */
: "r" ((unsigned int) AC_SENABLE),
"i" (ASI_CONTROL), "i" (SENABLE_CACHE)
: "g1", "memory");
sun4c_vacinfo.on = 1;
}
/* sun4c_disable_vac() disables the virtual address cache. */
static inline void sun4c_disable_vac(void)
{
__asm__ __volatile__("lduba [%0] %1, %%g1\n\t"
"andn %%g1, %2, %%g1\n\t"
"stba %%g1, [%0] %1\n\t"
: /* no outputs */
: "r" ((unsigned int) AC_SENABLE),
"i" (ASI_CONTROL), "i" (SENABLE_CACHE)
: "g1", "memory");
sun4c_vacinfo.on = 0;
}
#endif /* !(_SPARC_VAC_OPS_H) */

View File

@ -34,22 +34,6 @@
#define IOBASE_VADDR 0xfe000000
#define IOBASE_END 0xfe600000
/*
* On the sun4/4c we need a place
* to reliably map locked down kernel data. This includes the
* task_struct and kernel stack pages of each process plus the
* scsi buffers during dvma IO transfers, also the floppy buffers
* during pseudo dma which runs with traps off (no faults allowed).
* Some quick calculations yield:
* NR_TASKS <512> * (3 * PAGE_SIZE) == 0x600000
* Subtract this from 0xc00000 and you get 0x927C0 of vm left
* over to map SCSI dvma + floppy pseudo-dma buffers. So be
* careful if you change NR_TASKS or else there won't be enough
* room for it all.
*/
#define SUN4C_LOCK_VADDR 0xff000000
#define SUN4C_LOCK_END 0xffc00000
#define KADB_DEBUGGER_BEGVM 0xffc00000 /* Where kern debugger is in virt-mem */
#define KADB_DEBUGGER_ENDVM 0xffd00000
#define DEBUG_FIRSTVADDR KADB_DEBUGGER_BEGVM

View File

@ -103,37 +103,24 @@
st %scratch, [%cur_reg + TI_W_SAVED];
#ifdef CONFIG_SMP
/* Results of LOAD_CURRENT() after BTFIXUP for SUN4M, SUN4D & LEON (comments) */
#define LOAD_CURRENT4M(dest_reg, idreg) \
rd %tbr, %idreg; \
sethi %hi(current_set), %dest_reg; \
srl %idreg, 10, %idreg; \
or %dest_reg, %lo(current_set), %dest_reg; \
and %idreg, 0xc, %idreg; \
ld [%idreg + %dest_reg], %dest_reg;
#define LOAD_CURRENT4D(dest_reg, idreg) \
lda [%g0] ASI_M_VIKING_TMP1, %idreg; \
sethi %hi(C_LABEL(current_set)), %dest_reg; \
sll %idreg, 2, %idreg; \
or %dest_reg, %lo(C_LABEL(current_set)), %dest_reg; \
ld [%idreg + %dest_reg], %dest_reg;
#define LOAD_CURRENT_LEON(dest_reg, idreg) \
rd %asr17, %idreg; \
sethi %hi(current_set), %dest_reg; \
srl %idreg, 0x1c, %idreg; \
or %dest_reg, %lo(current_set), %dest_reg; \
sll %idreg, 0x2, %idreg; \
ld [%idreg + %dest_reg], %dest_reg;
/* Blackbox - take care with this... - check smp4m and smp4d before changing this. */
#define LOAD_CURRENT(dest_reg, idreg) \
sethi %hi(___b_load_current), %idreg; \
sethi %hi(current_set), %dest_reg; \
sethi %hi(boot_cpu_id4), %idreg; \
or %dest_reg, %lo(current_set), %dest_reg; \
ldub [%idreg + %lo(boot_cpu_id4)], %idreg; \
#define LOAD_CURRENT(dest_reg, idreg) \
661: rd %tbr, %idreg; \
srl %idreg, 10, %idreg; \
and %idreg, 0xc, %idreg; \
.section .cpuid_patch, "ax"; \
/* Instruction location. */ \
.word 661b; \
/* SUN4D implementation. */ \
lda [%g0] ASI_M_VIKING_TMP1, %idreg; \
sll %idreg, 2, %idreg; \
nop; \
/* LEON implementation. */ \
rd %asr17, %idreg; \
srl %idreg, 0x1c, %idreg; \
sll %idreg, 0x02, %idreg; \
.previous; \
sethi %hi(current_set), %dest_reg; \
or %dest_reg, %lo(current_set), %dest_reg;\
ld [%idreg + %dest_reg], %dest_reg;
#else
#define LOAD_CURRENT(dest_reg, idreg) \

View File

@ -28,7 +28,7 @@ obj-y += traps_$(BITS).o
# IRQ
obj-y += irq_$(BITS).o
obj-$(CONFIG_SPARC32) += sun4m_irq.o sun4c_irq.o sun4d_irq.o
obj-$(CONFIG_SPARC32) += sun4m_irq.o sun4d_irq.o
obj-y += process_$(BITS).o
obj-y += signal_$(BITS).o
@ -46,7 +46,6 @@ obj-$(CONFIG_SPARC32) += tadpole.o
obj-y += ptrace_$(BITS).o
obj-y += unaligned_$(BITS).o
obj-y += una_asm_$(BITS).o
obj-$(CONFIG_SPARC32) += muldiv.o
obj-y += prom_common.o
obj-y += prom_$(BITS).o
obj-y += of_device_common.o

View File

@ -32,7 +32,6 @@ void __init auxio_probe(void)
switch (sparc_cpu_model) {
case sparc_leon:
case sun4d:
case sun4:
return;
default:
break;
@ -65,9 +64,8 @@ void __init auxio_probe(void)
r.start = auxregs[0].phys_addr;
r.end = auxregs[0].phys_addr + auxregs[0].reg_size - 1;
auxio_register = of_ioremap(&r, 0, auxregs[0].reg_size, "auxio");
/* Fix the address on sun4m and sun4c. */
if((((unsigned long) auxregs[0].phys_addr) & 3) == 3 ||
sparc_cpu_model == sun4c)
/* Fix the address on sun4m. */
if ((((unsigned long) auxregs[0].phys_addr) & 3) == 3)
auxio_register += (3 - ((unsigned long)auxio_register & 3));
set_auxio(AUXIO_LED, 0);
@ -86,12 +84,7 @@ void set_auxio(unsigned char bits_on, unsigned char bits_off)
unsigned char regval;
unsigned long flags;
spin_lock_irqsave(&auxio_lock, flags);
switch(sparc_cpu_model) {
case sun4c:
regval = sbus_readb(auxio_register);
sbus_writeb(((regval | bits_on) & ~bits_off) | AUXIO_ORMEIN,
auxio_register);
break;
switch (sparc_cpu_model) {
case sun4m:
if(!auxio_register)
break; /* VME chassis sun4m, no auxio. */

View File

@ -21,7 +21,6 @@
#include <asm/cpu_type.h>
extern void clock_stop_probe(void); /* tadpole.c */
extern void sun4c_probe_memerr_reg(void);
static char *cpu_mid_prop(void)
{
@ -139,7 +138,4 @@ void __init device_scan(void)
auxio_power_probe();
}
clock_stop_probe();
if (ARCH_SUN4C)
sun4c_probe_memerr_reg();
}

View File

@ -868,7 +868,7 @@ void ldom_power_off(void)
static void ds_conn_reset(struct ds_info *dp)
{
printk(KERN_ERR "ds-%llu: ds_conn_reset() from %p\n",
printk(KERN_ERR "ds-%llu: ds_conn_reset() from %pf\n",
dp->id, __builtin_return_address(0));
}

View File

@ -7,6 +7,7 @@
* Copyright (C) 1997 Anton Blanchard (anton@progsoc.uts.edu.au)
*/
#include <linux/linkage.h>
#include <linux/errno.h>
#include <asm/head.h>
@ -17,10 +18,8 @@
#include <asm/asm-offsets.h>
#include <asm/psr.h>
#include <asm/vaddrs.h>
#include <asm/memreg.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/pgtsun4c.h>
#include <asm/winmacro.h>
#include <asm/signal.h>
#include <asm/obio.h>
@ -125,22 +124,11 @@ floppy_tdone:
set auxio_register, %l7
ld [%l7], %l7
set sparc_cpu_model, %l5
ld [%l5], %l5
subcc %l5, 1, %g0 /* enum { sun4c = 1 }; */
be 1f
ldub [%l7], %l5
ldub [%l7], %l5
or %l5, 0xc2, %l5
stb %l5, [%l7]
andn %l5, 0x02, %l5
b 2f
nop
1:
or %l5, 0xf4, %l5
stb %l5, [%l7]
andn %l5, 0x04, %l5
2:
/* Kill some time so the bits set */
@ -266,6 +254,11 @@ smp4m_ticker:
WRITE_PAUSE
RESTORE_ALL
#define GET_PROCESSOR4M_ID(reg) \
rd %tbr, %reg; \
srl %reg, 12, %reg; \
and %reg, 3, %reg;
/* Here is where we check for possible SMP IPI passed to us
* on some level other than 15 which is the NMI and only used
* for cross calls. That has a separate entry point below.
@ -328,7 +321,7 @@ linux_trap_ipi15_sun4m:
ld [%o5 + %o0], %o5
ld [%o5 + 0x00], %o3 ! sun4m_irq_percpu[cpu]->pending
andcc %o3, %o2, %g0
be 1f ! Must be an NMI async memory error
be sun4m_nmi_error ! Must be an NMI async memory error
st %o2, [%o5 + 0x04] ! sun4m_irq_percpu[cpu]->clear=0x80000000
WRITE_PAUSE
ld [%o5 + 0x00], %g0 ! sun4m_irq_percpu[cpu]->pending
@ -342,27 +335,6 @@ linux_trap_ipi15_sun4m:
nop
b ret_trap_lockless_ipi
clr %l6
1:
/* NMI async memory error handling. */
sethi %hi(0x80000000), %l4
sethi %hi(sun4m_irq_global), %o5
ld [%o5 + %lo(sun4m_irq_global)], %l5
st %l4, [%l5 + 0x0c] ! sun4m_irq_global->mask_set=0x80000000
WRITE_PAUSE
ld [%l5 + 0x00], %g0 ! sun4m_irq_global->pending
WRITE_PAUSE
or %l0, PSR_PIL, %l4
wr %l4, 0x0, %psr
WRITE_PAUSE
wr %l4, PSR_ET, %psr
WRITE_PAUSE
call sun4m_nmi
nop
st %l4, [%l5 + 0x08] ! sun4m_irq_global->mask_clear=0x80000000
WRITE_PAUSE
ld [%l5 + 0x00], %g0 ! sun4m_irq_global->pending
WRITE_PAUSE
RESTORE_ALL
.globl smp4d_ticker
/* SMP per-cpu ticker interrupts are handled specially. */
@ -760,327 +732,38 @@ setcc_trap_handler:
jmp %l2 ! advance over trap instruction
rett %l2 + 0x4 ! like this...
.align 4
.globl linux_trap_nmi_sun4c
linux_trap_nmi_sun4c:
SAVE_ALL
/* Ugh, we need to clear the IRQ line. This is now
* a very sun4c specific trap handler...
*/
sethi %hi(interrupt_enable), %l5
ld [%l5 + %lo(interrupt_enable)], %l5
ldub [%l5], %l6
andn %l6, INTS_ENAB, %l6
stb %l6, [%l5]
/* Now it is safe to re-enable traps without recursion. */
or %l0, PSR_PIL, %l0
wr %l0, PSR_ET, %psr
sun4m_nmi_error:
/* NMI async memory error handling. */
sethi %hi(0x80000000), %l4
sethi %hi(sun4m_irq_global), %o5
ld [%o5 + %lo(sun4m_irq_global)], %l5
st %l4, [%l5 + 0x0c] ! sun4m_irq_global->mask_set=0x80000000
WRITE_PAUSE
/* Now call the c-code with the pt_regs frame ptr and the
* memory error registers as arguments. The ordering chosen
* here is due to unlatching semantics.
*/
sethi %hi(AC_SYNC_ERR), %o0
add %o0, 0x4, %o0
lda [%o0] ASI_CONTROL, %o2 ! sync vaddr
sub %o0, 0x4, %o0
lda [%o0] ASI_CONTROL, %o1 ! sync error
add %o0, 0xc, %o0
lda [%o0] ASI_CONTROL, %o4 ! async vaddr
sub %o0, 0x4, %o0
lda [%o0] ASI_CONTROL, %o3 ! async error
call sparc_lvl15_nmi
add %sp, STACKFRAME_SZ, %o0
RESTORE_ALL
.align 4
.globl invalid_segment_patch1_ff
.globl invalid_segment_patch2_ff
invalid_segment_patch1_ff: cmp %l4, 0xff
invalid_segment_patch2_ff: mov 0xff, %l3
.align 4
.globl invalid_segment_patch1_1ff
.globl invalid_segment_patch2_1ff
invalid_segment_patch1_1ff: cmp %l4, 0x1ff
invalid_segment_patch2_1ff: mov 0x1ff, %l3
.align 4
.globl num_context_patch1_16, num_context_patch2_16
num_context_patch1_16: mov 0x10, %l7
num_context_patch2_16: mov 0x10, %l7
.align 4
.globl vac_linesize_patch_32
vac_linesize_patch_32: subcc %l7, 32, %l7
.align 4
.globl vac_hwflush_patch1_on, vac_hwflush_patch2_on
/*
* Ugly, but we can't use hardware flushing on the sun4 and we'd require
* two instructions (Anton)
*/
vac_hwflush_patch1_on: addcc %l7, -PAGE_SIZE, %l7
vac_hwflush_patch2_on: sta %g0, [%l3 + %l7] ASI_HWFLUSHSEG
.globl invalid_segment_patch1, invalid_segment_patch2
.globl num_context_patch1
.globl vac_linesize_patch, vac_hwflush_patch1
.globl vac_hwflush_patch2
.align 4
.globl sun4c_fault
! %l0 = %psr
! %l1 = %pc
! %l2 = %npc
! %l3 = %wim
! %l7 = 1 for textfault
! We want error in %l5, vaddr in %l6
sun4c_fault:
sethi %hi(AC_SYNC_ERR), %l4
add %l4, 0x4, %l6 ! AC_SYNC_VA in %l6
lda [%l6] ASI_CONTROL, %l5 ! Address
lda [%l4] ASI_CONTROL, %l6 ! Error, retained for a bit
andn %l5, 0xfff, %l5 ! Encode all info into l7
srl %l6, 14, %l4
and %l4, 2, %l4
or %l5, %l4, %l4
or %l4, %l7, %l7 ! l7 = [addr,write,txtfault]
andcc %l0, PSR_PS, %g0
be sun4c_fault_fromuser
andcc %l7, 1, %g0 ! Text fault?
be 1f
sethi %hi(KERNBASE), %l4
mov %l1, %l5 ! PC
1:
cmp %l5, %l4
blu sun4c_fault_fromuser
sethi %hi(~((1 << SUN4C_REAL_PGDIR_SHIFT) - 1)), %l4
/* If the kernel references a bum kernel pointer, or a pte which
* points to a non existent page in ram, we will run this code
* _forever_ and lock up the machine!!!!! So we must check for
* this condition, the AC_SYNC_ERR bits are what we must examine.
* Also a parity error would make this happen as well. So we just
* check that we are in fact servicing a tlb miss and not some
* other type of fault for the kernel.
*/
andcc %l6, 0x80, %g0
be sun4c_fault_fromuser
and %l5, %l4, %l5
/* Test for NULL pte_t * in vmalloc area. */
sethi %hi(VMALLOC_START), %l4
cmp %l5, %l4
blu,a invalid_segment_patch1
lduXa [%l5] ASI_SEGMAP, %l4
sethi %hi(swapper_pg_dir), %l4
srl %l5, SUN4C_PGDIR_SHIFT, %l6
or %l4, %lo(swapper_pg_dir), %l4
sll %l6, 2, %l6
ld [%l4 + %l6], %l4
andcc %l4, PAGE_MASK, %g0
be sun4c_fault_fromuser
lduXa [%l5] ASI_SEGMAP, %l4
invalid_segment_patch1:
cmp %l4, 0x7f
bne 1f
sethi %hi(sun4c_kfree_ring), %l4
or %l4, %lo(sun4c_kfree_ring), %l4
ld [%l4 + 0x18], %l3
deccc %l3 ! do we have a free entry?
bcs,a 2f ! no, unmap one.
sethi %hi(sun4c_kernel_ring), %l4
st %l3, [%l4 + 0x18] ! sun4c_kfree_ring.num_entries--
ld [%l4 + 0x00], %l6 ! entry = sun4c_kfree_ring.ringhd.next
st %l5, [%l6 + 0x08] ! entry->vaddr = address
ld [%l6 + 0x00], %l3 ! next = entry->next
ld [%l6 + 0x04], %l7 ! entry->prev
st %l7, [%l3 + 0x04] ! next->prev = entry->prev
st %l3, [%l7 + 0x00] ! entry->prev->next = next
sethi %hi(sun4c_kernel_ring), %l4
or %l4, %lo(sun4c_kernel_ring), %l4
! head = &sun4c_kernel_ring.ringhd
ld [%l4 + 0x00], %l7 ! head->next
st %l4, [%l6 + 0x04] ! entry->prev = head
st %l7, [%l6 + 0x00] ! entry->next = head->next
st %l6, [%l7 + 0x04] ! head->next->prev = entry
st %l6, [%l4 + 0x00] ! head->next = entry
ld [%l4 + 0x18], %l3
inc %l3 ! sun4c_kernel_ring.num_entries++
st %l3, [%l4 + 0x18]
b 4f
ld [%l6 + 0x08], %l5
2:
or %l4, %lo(sun4c_kernel_ring), %l4
! head = &sun4c_kernel_ring.ringhd
ld [%l4 + 0x04], %l6 ! entry = head->prev
ld [%l6 + 0x08], %l3 ! tmp = entry->vaddr
! Flush segment from the cache.
sethi %hi((64 * 1024)), %l7
9:
vac_hwflush_patch1:
vac_linesize_patch:
subcc %l7, 16, %l7
bne 9b
vac_hwflush_patch2:
sta %g0, [%l3 + %l7] ASI_FLUSHSEG
st %l5, [%l6 + 0x08] ! entry->vaddr = address
ld [%l6 + 0x00], %l5 ! next = entry->next
ld [%l6 + 0x04], %l7 ! entry->prev
st %l7, [%l5 + 0x04] ! next->prev = entry->prev
st %l5, [%l7 + 0x00] ! entry->prev->next = next
st %l4, [%l6 + 0x04] ! entry->prev = head
ld [%l4 + 0x00], %l7 ! head->next
st %l7, [%l6 + 0x00] ! entry->next = head->next
st %l6, [%l7 + 0x04] ! head->next->prev = entry
st %l6, [%l4 + 0x00] ! head->next = entry
mov %l3, %l5 ! address = tmp
4:
num_context_patch1:
mov 0x08, %l7
ld [%l6 + 0x08], %l4
ldub [%l6 + 0x0c], %l3
or %l4, %l3, %l4 ! encode new vaddr/pseg into l4
sethi %hi(AC_CONTEXT), %l3
lduba [%l3] ASI_CONTROL, %l6
/* Invalidate old mapping, instantiate new mapping,
* for each context. Registers l6/l7 are live across
* this loop.
*/
3: deccc %l7
sethi %hi(AC_CONTEXT), %l3
stba %l7, [%l3] ASI_CONTROL
invalid_segment_patch2:
mov 0x7f, %l3
stXa %l3, [%l5] ASI_SEGMAP
andn %l4, 0x1ff, %l3
bne 3b
stXa %l4, [%l3] ASI_SEGMAP
sethi %hi(AC_CONTEXT), %l3
stba %l6, [%l3] ASI_CONTROL
andn %l4, 0x1ff, %l5
1:
sethi %hi(VMALLOC_START), %l4
cmp %l5, %l4
bgeu 1f
mov 1 << (SUN4C_REAL_PGDIR_SHIFT - PAGE_SHIFT), %l7
sethi %hi(KERNBASE), %l6
sub %l5, %l6, %l4
srl %l4, PAGE_SHIFT, %l4
sethi %hi((SUN4C_PAGE_KERNEL & 0xf4000000)), %l3
or %l3, %l4, %l3
sethi %hi(PAGE_SIZE), %l4
2:
sta %l3, [%l5] ASI_PTE
deccc %l7
inc %l3
bne 2b
add %l5, %l4, %l5
b 7f
sethi %hi(sun4c_kernel_faults), %l4
1:
srl %l5, SUN4C_PGDIR_SHIFT, %l3
sethi %hi(swapper_pg_dir), %l4
or %l4, %lo(swapper_pg_dir), %l4
sll %l3, 2, %l3
ld [%l4 + %l3], %l4
and %l4, PAGE_MASK, %l4
srl %l5, (PAGE_SHIFT - 2), %l6
and %l6, ((SUN4C_PTRS_PER_PTE - 1) << 2), %l6
add %l6, %l4, %l6
sethi %hi(PAGE_SIZE), %l4
2:
ld [%l6], %l3
deccc %l7
sta %l3, [%l5] ASI_PTE
add %l6, 0x4, %l6
bne 2b
add %l5, %l4, %l5
sethi %hi(sun4c_kernel_faults), %l4
7:
ld [%l4 + %lo(sun4c_kernel_faults)], %l3
inc %l3
st %l3, [%l4 + %lo(sun4c_kernel_faults)]
/* Restore condition codes */
wr %l0, 0x0, %psr
ld [%l5 + 0x00], %g0 ! sun4m_irq_global->pending
WRITE_PAUSE
jmp %l1
rett %l2
sun4c_fault_fromuser:
SAVE_ALL
or %l0, PSR_PIL, %l4
wr %l4, 0x0, %psr
WRITE_PAUSE
wr %l4, PSR_ET, %psr
WRITE_PAUSE
call sun4m_nmi
nop
mov %l7, %o1 ! Decode the info from %l7
mov %l7, %o2
and %o1, 1, %o1 ! arg2 = text_faultp
mov %l7, %o3
and %o2, 2, %o2 ! arg3 = writep
andn %o3, 0xfff, %o3 ! arg4 = faulting address
wr %l0, PSR_ET, %psr
st %l4, [%l5 + 0x08] ! sun4m_irq_global->mask_clear=0x80000000
WRITE_PAUSE
ld [%l5 + 0x00], %g0 ! sun4m_irq_global->pending
WRITE_PAUSE
call do_sun4c_fault
add %sp, STACKFRAME_SZ, %o0 ! arg1 = pt_regs ptr
RESTORE_ALL
#ifndef CONFIG_SMP
.align 4
.globl linux_trap_ipi15_sun4m
linux_trap_ipi15_sun4m:
SAVE_ALL
ba sun4m_nmi_error
nop
#endif /* CONFIG_SMP */
.align 4
.globl srmmu_fault
srmmu_fault:
@ -1483,11 +1166,13 @@ fpload:
.globl __ndelay
__ndelay:
save %sp, -STACKFRAME_SZ, %sp
mov %i0, %o0
call .umul ! round multiplier up so large ns ok
mov 0x1ae, %o1 ! 2**32 / (1 000 000 000 / HZ)
call .umul
mov %i1, %o1 ! udelay_val
mov %i0, %o0 ! round multiplier up so large ns ok
mov 0x1ae, %o1 ! 2**32 / (1 000 000 000 / HZ)
umul %o0, %o1, %o0
rd %y, %o1
mov %i1, %o1 ! udelay_val
umul %o0, %o1, %o0
rd %y, %o1
ba delay_continue
mov %o1, %o0 ! >>32 later for better resolution
@ -1496,18 +1181,21 @@ __udelay:
save %sp, -STACKFRAME_SZ, %sp
mov %i0, %o0
sethi %hi(0x10c7), %o1 ! round multiplier up so large us ok
call .umul
or %o1, %lo(0x10c7), %o1 ! 2**32 / 1 000 000
call .umul
mov %i1, %o1 ! udelay_val
or %o1, %lo(0x10c7), %o1 ! 2**32 / 1 000 000
umul %o0, %o1, %o0
rd %y, %o1
mov %i1, %o1 ! udelay_val
umul %o0, %o1, %o0
rd %y, %o1
sethi %hi(0x028f4b62), %l0 ! Add in rounding constant * 2**32,
or %g0, %lo(0x028f4b62), %l0
addcc %o0, %l0, %o0 ! 2**32 * 0.009 999
bcs,a 3f
add %o1, 0x01, %o1
3:
call .umul
mov HZ, %o0 ! >>32 earlier for wider range
mov HZ, %o0 ! >>32 earlier for wider range
umul %o0, %o1, %o0
rd %y, %o1
delay_continue:
cmp %o0, 0x0
@ -1670,4 +1358,26 @@ flushw_all:
ret
restore
#ifdef CONFIG_SMP
ENTRY(hard_smp_processor_id)
661: rd %tbr, %g1
srl %g1, 12, %o0
and %o0, 3, %o0
.section .cpuid_patch, "ax"
/* Instruction location. */
.word 661b
/* SUN4D implementation. */
lda [%g0] ASI_M_VIKING_TMP1, %o0
nop
nop
/* LEON implementation. */
rd %asr17, %o0
srl %o0, 0x1c, %o0
nop
.previous
retl
nop
ENDPROC(hard_smp_processor_id)
#endif
/* End of entry.S */

View File

@ -216,9 +216,7 @@ tsetup_patch6:
/* Call MMU-architecture dependent stack checking
* routine.
*/
.globl tsetup_mmu_patchme
tsetup_mmu_patchme:
b tsetup_sun4c_stackchk
b tsetup_srmmu_stackchk
andcc %sp, 0x7, %g0
/* Architecture specific stack checking routines. When either
@ -228,52 +226,6 @@ tsetup_mmu_patchme:
*/
#define glob_tmp g1
tsetup_sun4c_stackchk:
/* Done by caller: andcc %sp, 0x7, %g0 */
bne trap_setup_user_stack_is_bolixed
sra %sp, 29, %glob_tmp
add %glob_tmp, 0x1, %glob_tmp
andncc %glob_tmp, 0x1, %g0
bne trap_setup_user_stack_is_bolixed
and %sp, 0xfff, %glob_tmp ! delay slot
/* See if our dump area will be on more than one
* page.
*/
add %glob_tmp, 0x38, %glob_tmp
andncc %glob_tmp, 0xff8, %g0
be tsetup_sun4c_onepage ! only one page to check
lda [%sp] ASI_PTE, %glob_tmp ! have to check first page anyways
tsetup_sun4c_twopages:
/* Is first page ok permission wise? */
srl %glob_tmp, 29, %glob_tmp
cmp %glob_tmp, 0x6
bne trap_setup_user_stack_is_bolixed
add %sp, 0x38, %glob_tmp /* Is second page in vma hole? */
sra %glob_tmp, 29, %glob_tmp
add %glob_tmp, 0x1, %glob_tmp
andncc %glob_tmp, 0x1, %g0
bne trap_setup_user_stack_is_bolixed
add %sp, 0x38, %glob_tmp
lda [%glob_tmp] ASI_PTE, %glob_tmp
tsetup_sun4c_onepage:
srl %glob_tmp, 29, %glob_tmp
cmp %glob_tmp, 0x6 ! can user write to it?
bne trap_setup_user_stack_is_bolixed ! failure
nop
STORE_WINDOW(sp)
restore %g0, %g0, %g0
jmpl %t_retpc + 0x8, %g0
mov %t_kstack, %sp
.globl tsetup_srmmu_stackchk
tsetup_srmmu_stackchk:
/* Check results of callers andcc %sp, 0x7, %g0 */

View File

@ -26,11 +26,9 @@
#include <asm/pgtsrmmu.h> /* SRMMU_PGDIR_SHIFT */
.data
/*
* The following are used with the prom_vector node-ops to figure out
* the cpu-type
/* The following are used with the prom_vector node-ops to figure out
* the cpu-type
*/
.align 4
cputyp:
.word 1
@ -38,384 +36,35 @@ cputyp:
.align 4
.globl cputypval
cputypval:
.asciz "sun4c"
.asciz "sun4m"
.ascii " "
cputypvalend:
cputypvallen = cputypvar - cputypval
/* Tested on SS-5, SS-10 */
.align 4
/*
* Sun people can't spell worth damn. "compatability" indeed.
* At least we *know* we can't spell, and use a spell-checker.
*/
/* Uh, actually Linus it is I who cannot spell. Too much murky
* Sparc assembly will do this to ya.
*/
cputypvar:
.asciz "compatability"
/* Tested on SS-5, SS-10. Probably someone at Sun applied a spell-checker. */
.align 4
cputypvar_sun4m:
.asciz "compatible"
.align 4
sun4_notsup:
.asciz "Sparc-Linux sun4 support does no longer exist.\n\n"
sun4c_notsup:
.asciz "Sparc-Linux sun4/sun4c support does no longer exist.\n\n"
.align 4
sun4e_notsup:
.asciz "Sparc-Linux sun4e support does not exist\n\n"
.align 4
/* The Sparc trap table, bootloader gives us control at _start. */
__HEAD
.globl _stext, _start, __stext
.globl trapbase
_start: /* danger danger */
__stext:
_stext:
trapbase:
#ifdef CONFIG_SMP
trapbase_cpu0:
#endif
/* We get control passed to us here at t_zero. */
t_zero: b gokernel; nop; nop; nop;
t_tflt: SPARC_TFAULT /* Inst. Access Exception */
t_bins: TRAP_ENTRY(0x2, bad_instruction) /* Illegal Instruction */
t_pins: TRAP_ENTRY(0x3, priv_instruction) /* Privileged Instruction */
t_fpd: TRAP_ENTRY(0x4, fpd_trap_handler) /* Floating Point Disabled */
t_wovf: WINDOW_SPILL /* Window Overflow */
t_wunf: WINDOW_FILL /* Window Underflow */
t_mna: TRAP_ENTRY(0x7, mna_handler) /* Memory Address Not Aligned */
t_fpe: TRAP_ENTRY(0x8, fpe_trap_handler) /* Floating Point Exception */
t_dflt: SPARC_DFAULT /* Data Miss Exception */
t_tio: TRAP_ENTRY(0xa, do_tag_overflow) /* Tagged Instruction Ovrflw */
t_wpt: TRAP_ENTRY(0xb, do_watchpoint) /* Watchpoint Detected */
t_badc: BAD_TRAP(0xc) BAD_TRAP(0xd) BAD_TRAP(0xe) BAD_TRAP(0xf) BAD_TRAP(0x10)
t_irq1: TRAP_ENTRY_INTERRUPT(1) /* IRQ Software/SBUS Level 1 */
t_irq2: TRAP_ENTRY_INTERRUPT(2) /* IRQ SBUS Level 2 */
t_irq3: TRAP_ENTRY_INTERRUPT(3) /* IRQ SCSI/DMA/SBUS Level 3 */
t_irq4: TRAP_ENTRY_INTERRUPT(4) /* IRQ Software Level 4 */
t_irq5: TRAP_ENTRY_INTERRUPT(5) /* IRQ SBUS/Ethernet Level 5 */
t_irq6: TRAP_ENTRY_INTERRUPT(6) /* IRQ Software Level 6 */
t_irq7: TRAP_ENTRY_INTERRUPT(7) /* IRQ Video/SBUS Level 5 */
t_irq8: TRAP_ENTRY_INTERRUPT(8) /* IRQ SBUS Level 6 */
t_irq9: TRAP_ENTRY_INTERRUPT(9) /* IRQ SBUS Level 7 */
t_irq10:TRAP_ENTRY_INTERRUPT(10) /* IRQ Timer #1 (one we use) */
t_irq11:TRAP_ENTRY_INTERRUPT(11) /* IRQ Floppy Intr. */
t_irq12:TRAP_ENTRY_INTERRUPT(12) /* IRQ Zilog serial chip */
t_irq13:TRAP_ENTRY_INTERRUPT(13) /* IRQ Audio Intr. */
t_irq14:TRAP_ENTRY_INTERRUPT(14) /* IRQ Timer #2 */
.globl t_nmi
#ifndef CONFIG_SMP
t_nmi: NMI_TRAP /* Level 15 (NMI) */
#else
t_nmi: TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m)
#endif
t_racc: TRAP_ENTRY(0x20, do_reg_access) /* General Register Access Error */
t_iacce:BAD_TRAP(0x21) /* Instr Access Error */
t_bad22:BAD_TRAP(0x22) BAD_TRAP(0x23)
t_cpdis:TRAP_ENTRY(0x24, do_cp_disabled) /* Co-Processor Disabled */
t_uflsh:SKIP_TRAP(0x25, unimp_flush) /* Unimplemented FLUSH inst. */
t_bad26:BAD_TRAP(0x26) BAD_TRAP(0x27)
t_cpexc:TRAP_ENTRY(0x28, do_cp_exception) /* Co-Processor Exception */
t_dacce:SPARC_DFAULT /* Data Access Error */
t_hwdz: TRAP_ENTRY(0x2a, do_hw_divzero) /* Division by zero, you lose... */
t_dserr:BAD_TRAP(0x2b) /* Data Store Error */
t_daccm:BAD_TRAP(0x2c) /* Data Access MMU-Miss */
t_bad2d:BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31)
t_bad32:BAD_TRAP(0x32) BAD_TRAP(0x33) BAD_TRAP(0x34) BAD_TRAP(0x35) BAD_TRAP(0x36)
t_bad37:BAD_TRAP(0x37) BAD_TRAP(0x38) BAD_TRAP(0x39) BAD_TRAP(0x3a) BAD_TRAP(0x3b)
t_iaccm:BAD_TRAP(0x3c) /* Instr Access MMU-Miss */
t_bad3d:BAD_TRAP(0x3d) BAD_TRAP(0x3e) BAD_TRAP(0x3f) BAD_TRAP(0x40) BAD_TRAP(0x41)
t_bad42:BAD_TRAP(0x42) BAD_TRAP(0x43) BAD_TRAP(0x44) BAD_TRAP(0x45) BAD_TRAP(0x46)
t_bad47:BAD_TRAP(0x47) BAD_TRAP(0x48) BAD_TRAP(0x49) BAD_TRAP(0x4a) BAD_TRAP(0x4b)
t_bad4c:BAD_TRAP(0x4c) BAD_TRAP(0x4d) BAD_TRAP(0x4e) BAD_TRAP(0x4f) BAD_TRAP(0x50)
t_bad51:BAD_TRAP(0x51) BAD_TRAP(0x52) BAD_TRAP(0x53) BAD_TRAP(0x54) BAD_TRAP(0x55)
t_bad56:BAD_TRAP(0x56) BAD_TRAP(0x57) BAD_TRAP(0x58) BAD_TRAP(0x59) BAD_TRAP(0x5a)
t_bad5b:BAD_TRAP(0x5b) BAD_TRAP(0x5c) BAD_TRAP(0x5d) BAD_TRAP(0x5e) BAD_TRAP(0x5f)
t_bad60:BAD_TRAP(0x60) BAD_TRAP(0x61) BAD_TRAP(0x62) BAD_TRAP(0x63) BAD_TRAP(0x64)
t_bad65:BAD_TRAP(0x65) BAD_TRAP(0x66) BAD_TRAP(0x67) BAD_TRAP(0x68) BAD_TRAP(0x69)
t_bad6a:BAD_TRAP(0x6a) BAD_TRAP(0x6b) BAD_TRAP(0x6c) BAD_TRAP(0x6d) BAD_TRAP(0x6e)
t_bad6f:BAD_TRAP(0x6f) BAD_TRAP(0x70) BAD_TRAP(0x71) BAD_TRAP(0x72) BAD_TRAP(0x73)
t_bad74:BAD_TRAP(0x74) BAD_TRAP(0x75) BAD_TRAP(0x76) BAD_TRAP(0x77) BAD_TRAP(0x78)
t_bad79:BAD_TRAP(0x79) BAD_TRAP(0x7a) BAD_TRAP(0x7b) BAD_TRAP(0x7c) BAD_TRAP(0x7d)
t_bad7e:BAD_TRAP(0x7e) BAD_TRAP(0x7f)
t_bad80:BAD_TRAP(0x80) /* SunOS System Call */
t_sbkpt:BREAKPOINT_TRAP /* Software Breakpoint/KGDB */
t_divz: TRAP_ENTRY(0x82, do_hw_divzero) /* Divide by zero trap */
t_flwin:TRAP_ENTRY(0x83, do_flush_windows) /* Flush Windows Trap */
t_clwin:BAD_TRAP(0x84) /* Clean Windows Trap */
t_rchk: BAD_TRAP(0x85) /* Range Check */
t_funal:BAD_TRAP(0x86) /* Fix Unaligned Access Trap */
t_iovf: BAD_TRAP(0x87) /* Integer Overflow Trap */
t_bad88:BAD_TRAP(0x88) /* Slowaris System Call */
t_bad89:BAD_TRAP(0x89) /* Net-B.S. System Call */
t_bad8a:BAD_TRAP(0x8a) BAD_TRAP(0x8b) BAD_TRAP(0x8c) BAD_TRAP(0x8d) BAD_TRAP(0x8e)
t_bad8f:BAD_TRAP(0x8f)
t_linux:LINUX_SYSCALL_TRAP /* Linux System Call */
t_bad91:BAD_TRAP(0x91) BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94) BAD_TRAP(0x95)
t_bad96:BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99) BAD_TRAP(0x9a)
t_bad9b:BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e) BAD_TRAP(0x9f)
t_getcc:GETCC_TRAP /* Get Condition Codes */
t_setcc:SETCC_TRAP /* Set Condition Codes */
t_getpsr:GETPSR_TRAP /* Get PSR Register */
t_bada3:BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6)
t_bada7:BAD_TRAP(0xa7)
t_bada8:BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab)
t_badac:BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0)
t_badb1:BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5)
t_badb6:BAD_TRAP(0xb6) BAD_TRAP(0xb7) BAD_TRAP(0xb8) BAD_TRAP(0xb9) BAD_TRAP(0xba)
t_badbb:BAD_TRAP(0xbb) BAD_TRAP(0xbc) BAD_TRAP(0xbd) BAD_TRAP(0xbe) BAD_TRAP(0xbf)
t_badc0:BAD_TRAP(0xc0) BAD_TRAP(0xc1) BAD_TRAP(0xc2) BAD_TRAP(0xc3) BAD_TRAP(0xc4)
t_badc5:BAD_TRAP(0xc5) BAD_TRAP(0xc6) BAD_TRAP(0xc7) BAD_TRAP(0xc8) BAD_TRAP(0xc9)
t_badca:BAD_TRAP(0xca) BAD_TRAP(0xcb) BAD_TRAP(0xcc) BAD_TRAP(0xcd) BAD_TRAP(0xce)
t_badcf:BAD_TRAP(0xcf) BAD_TRAP(0xd0) BAD_TRAP(0xd1) BAD_TRAP(0xd2) BAD_TRAP(0xd3)
t_badd4:BAD_TRAP(0xd4) BAD_TRAP(0xd5) BAD_TRAP(0xd6) BAD_TRAP(0xd7) BAD_TRAP(0xd8)
t_badd9:BAD_TRAP(0xd9) BAD_TRAP(0xda) BAD_TRAP(0xdb) BAD_TRAP(0xdc) BAD_TRAP(0xdd)
t_badde:BAD_TRAP(0xde) BAD_TRAP(0xdf) BAD_TRAP(0xe0) BAD_TRAP(0xe1) BAD_TRAP(0xe2)
t_bade3:BAD_TRAP(0xe3) BAD_TRAP(0xe4) BAD_TRAP(0xe5) BAD_TRAP(0xe6) BAD_TRAP(0xe7)
t_bade8:BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xec)
t_baded:BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1)
t_badf2:BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6)
t_badf7:BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb)
t_badfc:BAD_TRAP(0xfc)
t_kgdb: KGDB_TRAP(0xfd)
dbtrap: BAD_TRAP(0xfe) /* Debugger/PROM breakpoint #1 */
dbtrap2:BAD_TRAP(0xff) /* Debugger/PROM breakpoint #2 */
/* The trap-table - located in the __HEAD section */
#include "ttable_32.S"
.globl end_traptable
end_traptable:
#ifdef CONFIG_SMP
/* Trap tables for the other cpus. */
.globl trapbase_cpu1, trapbase_cpu2, trapbase_cpu3
trapbase_cpu1:
BAD_TRAP(0x0) SRMMU_TFAULT TRAP_ENTRY(0x2, bad_instruction)
TRAP_ENTRY(0x3, priv_instruction) TRAP_ENTRY(0x4, fpd_trap_handler)
WINDOW_SPILL WINDOW_FILL TRAP_ENTRY(0x7, mna_handler)
TRAP_ENTRY(0x8, fpe_trap_handler) SRMMU_DFAULT
TRAP_ENTRY(0xa, do_tag_overflow) TRAP_ENTRY(0xb, do_watchpoint)
BAD_TRAP(0xc) BAD_TRAP(0xd) BAD_TRAP(0xe) BAD_TRAP(0xf) BAD_TRAP(0x10)
TRAP_ENTRY_INTERRUPT(1) TRAP_ENTRY_INTERRUPT(2)
TRAP_ENTRY_INTERRUPT(3) TRAP_ENTRY_INTERRUPT(4)
TRAP_ENTRY_INTERRUPT(5) TRAP_ENTRY_INTERRUPT(6)
TRAP_ENTRY_INTERRUPT(7) TRAP_ENTRY_INTERRUPT(8)
TRAP_ENTRY_INTERRUPT(9) TRAP_ENTRY_INTERRUPT(10)
TRAP_ENTRY_INTERRUPT(11) TRAP_ENTRY_INTERRUPT(12)
TRAP_ENTRY_INTERRUPT(13) TRAP_ENTRY_INTERRUPT(14)
TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m)
TRAP_ENTRY(0x20, do_reg_access) BAD_TRAP(0x21) BAD_TRAP(0x22)
BAD_TRAP(0x23) TRAP_ENTRY(0x24, do_cp_disabled) SKIP_TRAP(0x25, unimp_flush)
BAD_TRAP(0x26) BAD_TRAP(0x27) TRAP_ENTRY(0x28, do_cp_exception)
SRMMU_DFAULT TRAP_ENTRY(0x2a, do_hw_divzero) BAD_TRAP(0x2b) BAD_TRAP(0x2c)
BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31)
BAD_TRAP(0x32) BAD_TRAP(0x33) BAD_TRAP(0x34) BAD_TRAP(0x35) BAD_TRAP(0x36)
BAD_TRAP(0x37) BAD_TRAP(0x38) BAD_TRAP(0x39) BAD_TRAP(0x3a) BAD_TRAP(0x3b)
BAD_TRAP(0x3c) BAD_TRAP(0x3d) BAD_TRAP(0x3e) BAD_TRAP(0x3f) BAD_TRAP(0x40)
BAD_TRAP(0x41) BAD_TRAP(0x42) BAD_TRAP(0x43) BAD_TRAP(0x44) BAD_TRAP(0x45)
BAD_TRAP(0x46) BAD_TRAP(0x47) BAD_TRAP(0x48) BAD_TRAP(0x49) BAD_TRAP(0x4a)
BAD_TRAP(0x4b) BAD_TRAP(0x4c) BAD_TRAP(0x4d) BAD_TRAP(0x4e) BAD_TRAP(0x4f)
BAD_TRAP(0x50)
BAD_TRAP(0x51) BAD_TRAP(0x52) BAD_TRAP(0x53) BAD_TRAP(0x54) BAD_TRAP(0x55)
BAD_TRAP(0x56) BAD_TRAP(0x57) BAD_TRAP(0x58) BAD_TRAP(0x59) BAD_TRAP(0x5a)
BAD_TRAP(0x5b) BAD_TRAP(0x5c) BAD_TRAP(0x5d) BAD_TRAP(0x5e) BAD_TRAP(0x5f)
BAD_TRAP(0x60) BAD_TRAP(0x61) BAD_TRAP(0x62) BAD_TRAP(0x63) BAD_TRAP(0x64)
BAD_TRAP(0x65) BAD_TRAP(0x66) BAD_TRAP(0x67) BAD_TRAP(0x68) BAD_TRAP(0x69)
BAD_TRAP(0x6a) BAD_TRAP(0x6b) BAD_TRAP(0x6c) BAD_TRAP(0x6d) BAD_TRAP(0x6e)
BAD_TRAP(0x6f) BAD_TRAP(0x70) BAD_TRAP(0x71) BAD_TRAP(0x72) BAD_TRAP(0x73)
BAD_TRAP(0x74) BAD_TRAP(0x75) BAD_TRAP(0x76) BAD_TRAP(0x77) BAD_TRAP(0x78)
BAD_TRAP(0x79) BAD_TRAP(0x7a) BAD_TRAP(0x7b) BAD_TRAP(0x7c) BAD_TRAP(0x7d)
BAD_TRAP(0x7e) BAD_TRAP(0x7f)
BAD_TRAP(0x80)
BREAKPOINT_TRAP
TRAP_ENTRY(0x82, do_hw_divzero)
TRAP_ENTRY(0x83, do_flush_windows) BAD_TRAP(0x84) BAD_TRAP(0x85)
BAD_TRAP(0x86) BAD_TRAP(0x87) BAD_TRAP(0x88)
BAD_TRAP(0x89) BAD_TRAP(0x8a) BAD_TRAP(0x8b) BAD_TRAP(0x8c)
BAD_TRAP(0x8d) BAD_TRAP(0x8e) BAD_TRAP(0x8f)
LINUX_SYSCALL_TRAP BAD_TRAP(0x91) BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94)
BAD_TRAP(0x95) BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99)
BAD_TRAP(0x9a) BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e)
BAD_TRAP(0x9f) GETCC_TRAP SETCC_TRAP GETPSR_TRAP
BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6)
BAD_TRAP(0xa7) BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab)
BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0)
BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5)
BAD_TRAP(0xb6) BAD_TRAP(0xb7) BAD_TRAP(0xb8) BAD_TRAP(0xb9) BAD_TRAP(0xba)
BAD_TRAP(0xbb) BAD_TRAP(0xbc) BAD_TRAP(0xbd) BAD_TRAP(0xbe) BAD_TRAP(0xbf)
BAD_TRAP(0xc0) BAD_TRAP(0xc1) BAD_TRAP(0xc2) BAD_TRAP(0xc3) BAD_TRAP(0xc4)
BAD_TRAP(0xc5) BAD_TRAP(0xc6) BAD_TRAP(0xc7) BAD_TRAP(0xc8) BAD_TRAP(0xc9)
BAD_TRAP(0xca) BAD_TRAP(0xcb) BAD_TRAP(0xcc) BAD_TRAP(0xcd) BAD_TRAP(0xce)
BAD_TRAP(0xcf) BAD_TRAP(0xd0) BAD_TRAP(0xd1) BAD_TRAP(0xd2) BAD_TRAP(0xd3)
BAD_TRAP(0xd4) BAD_TRAP(0xd5) BAD_TRAP(0xd6) BAD_TRAP(0xd7) BAD_TRAP(0xd8)
BAD_TRAP(0xd9) BAD_TRAP(0xda) BAD_TRAP(0xdb) BAD_TRAP(0xdc) BAD_TRAP(0xdd)
BAD_TRAP(0xde) BAD_TRAP(0xdf) BAD_TRAP(0xe0) BAD_TRAP(0xe1) BAD_TRAP(0xe2)
BAD_TRAP(0xe3) BAD_TRAP(0xe4) BAD_TRAP(0xe5) BAD_TRAP(0xe6) BAD_TRAP(0xe7)
BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xec)
BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1)
BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6)
BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb)
BAD_TRAP(0xfc) KGDB_TRAP(0xfd) BAD_TRAP(0xfe) BAD_TRAP(0xff)
trapbase_cpu2:
BAD_TRAP(0x0) SRMMU_TFAULT TRAP_ENTRY(0x2, bad_instruction)
TRAP_ENTRY(0x3, priv_instruction) TRAP_ENTRY(0x4, fpd_trap_handler)
WINDOW_SPILL WINDOW_FILL TRAP_ENTRY(0x7, mna_handler)
TRAP_ENTRY(0x8, fpe_trap_handler) SRMMU_DFAULT
TRAP_ENTRY(0xa, do_tag_overflow) TRAP_ENTRY(0xb, do_watchpoint)
BAD_TRAP(0xc) BAD_TRAP(0xd) BAD_TRAP(0xe) BAD_TRAP(0xf) BAD_TRAP(0x10)
TRAP_ENTRY_INTERRUPT(1) TRAP_ENTRY_INTERRUPT(2)
TRAP_ENTRY_INTERRUPT(3) TRAP_ENTRY_INTERRUPT(4)
TRAP_ENTRY_INTERRUPT(5) TRAP_ENTRY_INTERRUPT(6)
TRAP_ENTRY_INTERRUPT(7) TRAP_ENTRY_INTERRUPT(8)
TRAP_ENTRY_INTERRUPT(9) TRAP_ENTRY_INTERRUPT(10)
TRAP_ENTRY_INTERRUPT(11) TRAP_ENTRY_INTERRUPT(12)
TRAP_ENTRY_INTERRUPT(13) TRAP_ENTRY_INTERRUPT(14)
TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m)
TRAP_ENTRY(0x20, do_reg_access) BAD_TRAP(0x21) BAD_TRAP(0x22)
BAD_TRAP(0x23) TRAP_ENTRY(0x24, do_cp_disabled) SKIP_TRAP(0x25, unimp_flush)
BAD_TRAP(0x26) BAD_TRAP(0x27) TRAP_ENTRY(0x28, do_cp_exception)
SRMMU_DFAULT TRAP_ENTRY(0x2a, do_hw_divzero) BAD_TRAP(0x2b) BAD_TRAP(0x2c)
BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31)
BAD_TRAP(0x32) BAD_TRAP(0x33) BAD_TRAP(0x34) BAD_TRAP(0x35) BAD_TRAP(0x36)
BAD_TRAP(0x37) BAD_TRAP(0x38) BAD_TRAP(0x39) BAD_TRAP(0x3a) BAD_TRAP(0x3b)
BAD_TRAP(0x3c) BAD_TRAP(0x3d) BAD_TRAP(0x3e) BAD_TRAP(0x3f) BAD_TRAP(0x40)
BAD_TRAP(0x41) BAD_TRAP(0x42) BAD_TRAP(0x43) BAD_TRAP(0x44) BAD_TRAP(0x45)
BAD_TRAP(0x46) BAD_TRAP(0x47) BAD_TRAP(0x48) BAD_TRAP(0x49) BAD_TRAP(0x4a)
BAD_TRAP(0x4b) BAD_TRAP(0x4c) BAD_TRAP(0x4d) BAD_TRAP(0x4e) BAD_TRAP(0x4f)
BAD_TRAP(0x50)
BAD_TRAP(0x51) BAD_TRAP(0x52) BAD_TRAP(0x53) BAD_TRAP(0x54) BAD_TRAP(0x55)
BAD_TRAP(0x56) BAD_TRAP(0x57) BAD_TRAP(0x58) BAD_TRAP(0x59) BAD_TRAP(0x5a)
BAD_TRAP(0x5b) BAD_TRAP(0x5c) BAD_TRAP(0x5d) BAD_TRAP(0x5e) BAD_TRAP(0x5f)
BAD_TRAP(0x60) BAD_TRAP(0x61) BAD_TRAP(0x62) BAD_TRAP(0x63) BAD_TRAP(0x64)
BAD_TRAP(0x65) BAD_TRAP(0x66) BAD_TRAP(0x67) BAD_TRAP(0x68) BAD_TRAP(0x69)
BAD_TRAP(0x6a) BAD_TRAP(0x6b) BAD_TRAP(0x6c) BAD_TRAP(0x6d) BAD_TRAP(0x6e)
BAD_TRAP(0x6f) BAD_TRAP(0x70) BAD_TRAP(0x71) BAD_TRAP(0x72) BAD_TRAP(0x73)
BAD_TRAP(0x74) BAD_TRAP(0x75) BAD_TRAP(0x76) BAD_TRAP(0x77) BAD_TRAP(0x78)
BAD_TRAP(0x79) BAD_TRAP(0x7a) BAD_TRAP(0x7b) BAD_TRAP(0x7c) BAD_TRAP(0x7d)
BAD_TRAP(0x7e) BAD_TRAP(0x7f)
BAD_TRAP(0x80)
BREAKPOINT_TRAP
TRAP_ENTRY(0x82, do_hw_divzero)
TRAP_ENTRY(0x83, do_flush_windows) BAD_TRAP(0x84) BAD_TRAP(0x85)
BAD_TRAP(0x86) BAD_TRAP(0x87) BAD_TRAP(0x88)
BAD_TRAP(0x89) BAD_TRAP(0x8a) BAD_TRAP(0x8b) BAD_TRAP(0x8c)
BAD_TRAP(0x8d) BAD_TRAP(0x8e) BAD_TRAP(0x8f)
LINUX_SYSCALL_TRAP BAD_TRAP(0x91) BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94)
BAD_TRAP(0x95) BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99)
BAD_TRAP(0x9a) BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e)
BAD_TRAP(0x9f) GETCC_TRAP SETCC_TRAP GETPSR_TRAP
BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6)
BAD_TRAP(0xa7) BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab)
BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0)
BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5)
BAD_TRAP(0xb6) BAD_TRAP(0xb7) BAD_TRAP(0xb8) BAD_TRAP(0xb9) BAD_TRAP(0xba)
BAD_TRAP(0xbb) BAD_TRAP(0xbc) BAD_TRAP(0xbd) BAD_TRAP(0xbe) BAD_TRAP(0xbf)
BAD_TRAP(0xc0) BAD_TRAP(0xc1) BAD_TRAP(0xc2) BAD_TRAP(0xc3) BAD_TRAP(0xc4)
BAD_TRAP(0xc5) BAD_TRAP(0xc6) BAD_TRAP(0xc7) BAD_TRAP(0xc8) BAD_TRAP(0xc9)
BAD_TRAP(0xca) BAD_TRAP(0xcb) BAD_TRAP(0xcc) BAD_TRAP(0xcd) BAD_TRAP(0xce)
BAD_TRAP(0xcf) BAD_TRAP(0xd0) BAD_TRAP(0xd1) BAD_TRAP(0xd2) BAD_TRAP(0xd3)
BAD_TRAP(0xd4) BAD_TRAP(0xd5) BAD_TRAP(0xd6) BAD_TRAP(0xd7) BAD_TRAP(0xd8)
BAD_TRAP(0xd9) BAD_TRAP(0xda) BAD_TRAP(0xdb) BAD_TRAP(0xdc) BAD_TRAP(0xdd)
BAD_TRAP(0xde) BAD_TRAP(0xdf) BAD_TRAP(0xe0) BAD_TRAP(0xe1) BAD_TRAP(0xe2)
BAD_TRAP(0xe3) BAD_TRAP(0xe4) BAD_TRAP(0xe5) BAD_TRAP(0xe6) BAD_TRAP(0xe7)
BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xec)
BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1)
BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6)
BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb)
BAD_TRAP(0xfc) KGDB_TRAP(0xfd) BAD_TRAP(0xfe) BAD_TRAP(0xff)
trapbase_cpu3:
BAD_TRAP(0x0) SRMMU_TFAULT TRAP_ENTRY(0x2, bad_instruction)
TRAP_ENTRY(0x3, priv_instruction) TRAP_ENTRY(0x4, fpd_trap_handler)
WINDOW_SPILL WINDOW_FILL TRAP_ENTRY(0x7, mna_handler)
TRAP_ENTRY(0x8, fpe_trap_handler) SRMMU_DFAULT
TRAP_ENTRY(0xa, do_tag_overflow) TRAP_ENTRY(0xb, do_watchpoint)
BAD_TRAP(0xc) BAD_TRAP(0xd) BAD_TRAP(0xe) BAD_TRAP(0xf) BAD_TRAP(0x10)
TRAP_ENTRY_INTERRUPT(1) TRAP_ENTRY_INTERRUPT(2)
TRAP_ENTRY_INTERRUPT(3) TRAP_ENTRY_INTERRUPT(4)
TRAP_ENTRY_INTERRUPT(5) TRAP_ENTRY_INTERRUPT(6)
TRAP_ENTRY_INTERRUPT(7) TRAP_ENTRY_INTERRUPT(8)
TRAP_ENTRY_INTERRUPT(9) TRAP_ENTRY_INTERRUPT(10)
TRAP_ENTRY_INTERRUPT(11) TRAP_ENTRY_INTERRUPT(12)
TRAP_ENTRY_INTERRUPT(13) TRAP_ENTRY_INTERRUPT(14)
TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m)
TRAP_ENTRY(0x20, do_reg_access) BAD_TRAP(0x21) BAD_TRAP(0x22)
BAD_TRAP(0x23) TRAP_ENTRY(0x24, do_cp_disabled) SKIP_TRAP(0x25, unimp_flush)
BAD_TRAP(0x26) BAD_TRAP(0x27) TRAP_ENTRY(0x28, do_cp_exception)
SRMMU_DFAULT TRAP_ENTRY(0x2a, do_hw_divzero) BAD_TRAP(0x2b) BAD_TRAP(0x2c)
BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31)
BAD_TRAP(0x32) BAD_TRAP(0x33) BAD_TRAP(0x34) BAD_TRAP(0x35) BAD_TRAP(0x36)
BAD_TRAP(0x37) BAD_TRAP(0x38) BAD_TRAP(0x39) BAD_TRAP(0x3a) BAD_TRAP(0x3b)
BAD_TRAP(0x3c) BAD_TRAP(0x3d) BAD_TRAP(0x3e) BAD_TRAP(0x3f) BAD_TRAP(0x40)
BAD_TRAP(0x41) BAD_TRAP(0x42) BAD_TRAP(0x43) BAD_TRAP(0x44) BAD_TRAP(0x45)
BAD_TRAP(0x46) BAD_TRAP(0x47) BAD_TRAP(0x48) BAD_TRAP(0x49) BAD_TRAP(0x4a)
BAD_TRAP(0x4b) BAD_TRAP(0x4c) BAD_TRAP(0x4d) BAD_TRAP(0x4e) BAD_TRAP(0x4f)
BAD_TRAP(0x50)
BAD_TRAP(0x51) BAD_TRAP(0x52) BAD_TRAP(0x53) BAD_TRAP(0x54) BAD_TRAP(0x55)
BAD_TRAP(0x56) BAD_TRAP(0x57) BAD_TRAP(0x58) BAD_TRAP(0x59) BAD_TRAP(0x5a)
BAD_TRAP(0x5b) BAD_TRAP(0x5c) BAD_TRAP(0x5d) BAD_TRAP(0x5e) BAD_TRAP(0x5f)
BAD_TRAP(0x60) BAD_TRAP(0x61) BAD_TRAP(0x62) BAD_TRAP(0x63) BAD_TRAP(0x64)
BAD_TRAP(0x65) BAD_TRAP(0x66) BAD_TRAP(0x67) BAD_TRAP(0x68) BAD_TRAP(0x69)
BAD_TRAP(0x6a) BAD_TRAP(0x6b) BAD_TRAP(0x6c) BAD_TRAP(0x6d) BAD_TRAP(0x6e)
BAD_TRAP(0x6f) BAD_TRAP(0x70) BAD_TRAP(0x71) BAD_TRAP(0x72) BAD_TRAP(0x73)
BAD_TRAP(0x74) BAD_TRAP(0x75) BAD_TRAP(0x76) BAD_TRAP(0x77) BAD_TRAP(0x78)
BAD_TRAP(0x79) BAD_TRAP(0x7a) BAD_TRAP(0x7b) BAD_TRAP(0x7c) BAD_TRAP(0x7d)
BAD_TRAP(0x7e) BAD_TRAP(0x7f)
BAD_TRAP(0x80)
BREAKPOINT_TRAP
TRAP_ENTRY(0x82, do_hw_divzero)
TRAP_ENTRY(0x83, do_flush_windows) BAD_TRAP(0x84) BAD_TRAP(0x85)
BAD_TRAP(0x86) BAD_TRAP(0x87) BAD_TRAP(0x88)
BAD_TRAP(0x89) BAD_TRAP(0x8a) BAD_TRAP(0x8b) BAD_TRAP(0x8c)
BAD_TRAP(0x8d) BAD_TRAP(0x8e) BAD_TRAP(0x8f)
LINUX_SYSCALL_TRAP BAD_TRAP(0x91) BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94)
BAD_TRAP(0x95) BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99)
BAD_TRAP(0x9a) BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e)
BAD_TRAP(0x9f) GETCC_TRAP SETCC_TRAP GETPSR_TRAP
BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6)
BAD_TRAP(0xa7) BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab)
BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0)
BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5)
BAD_TRAP(0xb6) BAD_TRAP(0xb7) BAD_TRAP(0xb8) BAD_TRAP(0xb9) BAD_TRAP(0xba)
BAD_TRAP(0xbb) BAD_TRAP(0xbc) BAD_TRAP(0xbd) BAD_TRAP(0xbe) BAD_TRAP(0xbf)
BAD_TRAP(0xc0) BAD_TRAP(0xc1) BAD_TRAP(0xc2) BAD_TRAP(0xc3) BAD_TRAP(0xc4)
BAD_TRAP(0xc5) BAD_TRAP(0xc6) BAD_TRAP(0xc7) BAD_TRAP(0xc8) BAD_TRAP(0xc9)
BAD_TRAP(0xca) BAD_TRAP(0xcb) BAD_TRAP(0xcc) BAD_TRAP(0xcd) BAD_TRAP(0xce)
BAD_TRAP(0xcf) BAD_TRAP(0xd0) BAD_TRAP(0xd1) BAD_TRAP(0xd2) BAD_TRAP(0xd3)
BAD_TRAP(0xd4) BAD_TRAP(0xd5) BAD_TRAP(0xd6) BAD_TRAP(0xd7) BAD_TRAP(0xd8)
BAD_TRAP(0xd9) BAD_TRAP(0xda) BAD_TRAP(0xdb) BAD_TRAP(0xdc) BAD_TRAP(0xdd)
BAD_TRAP(0xde) BAD_TRAP(0xdf) BAD_TRAP(0xe0) BAD_TRAP(0xe1) BAD_TRAP(0xe2)
BAD_TRAP(0xe3) BAD_TRAP(0xe4) BAD_TRAP(0xe5) BAD_TRAP(0xe6) BAD_TRAP(0xe7)
BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xec)
BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1)
BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6)
BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb)
BAD_TRAP(0xfc) KGDB_TRAP(0xfd) BAD_TRAP(0xfe) BAD_TRAP(0xff)
#endif
.align PAGE_SIZE
/* This was the only reasonable way I could think of to properly align
* these page-table data structures.
*/
.globl pg0, pg1, pg2, pg3
.globl empty_bad_page
.globl empty_bad_page_table
.globl empty_zero_page
.globl swapper_pg_dir
swapper_pg_dir: .skip PAGE_SIZE
pg0: .skip PAGE_SIZE
pg1: .skip PAGE_SIZE
pg2: .skip PAGE_SIZE
pg3: .skip PAGE_SIZE
empty_bad_page: .skip PAGE_SIZE
empty_bad_page_table: .skip PAGE_SIZE
.globl empty_zero_page
empty_zero_page: .skip PAGE_SIZE
.global root_flags
@ -523,10 +172,10 @@ copy_prom_lvl14:
ldd [%g2 + 0x8], %g4
std %g4, [%g3 + 0x8] ! Copy proms handler
/* Must determine whether we are on a sun4c MMU, SRMMU, or SUN4/400 MUTANT
* MMU so we can remap ourselves properly. DON'T TOUCH %l0 thru %l5 in these
* remapping routines, we need their values afterwards!
/* DON'T TOUCH %l0 thru %l5 in these remapping routines,
* we need their values afterwards!
*/
/* Now check whether we are already mapped, if we
* are we can skip all this garbage coming up.
*/
@ -535,26 +184,29 @@ copy_prom_done:
be go_to_highmem ! this will be a nop then
nop
set LOAD_ADDR, %g6
/* Validate that we are in fact running on an
* SRMMU based cpu.
*/
set 0x4000, %g6
cmp %g7, %g6
bne remap_not_a_sun4 ! This is not a Sun4
bne not_a_sun4
nop
or %g0, 0x1, %g1
lduba [%g1] ASI_CONTROL, %g1 ! Only safe to try on Sun4.
subcc %g1, 0x24, %g0 ! Is this a mutant Sun4/400???
be sun4_mutant_remap ! Ugh, it is...
halt_sun4_or_sun4c:
ld [%g7 + 0x68], %o1
set sun4c_notsup, %o0
sub %o0, %l6, %o0
call %o1
nop
ba halt_me
nop
b sun4_normal_remap ! regular sun4, 2 level mmu
not_a_sun4:
lda [%g0] ASI_M_MMUREGS, %g1
andcc %g1, 1, %g0
be halt_sun4_or_sun4c
nop
remap_not_a_sun4:
lda [%g0] ASI_M_MMUREGS, %g1 ! same as ASI_PTE on sun4c
and %g1, 0x1, %g1 ! Test SRMMU Enable bit ;-)
cmp %g1, 0x0
be sun4c_remap ! A sun4c MMU or normal Sun4
nop
srmmu_remap:
/* First, check for a viking (TI) module. */
set 0x40000000, %g2
@ -660,72 +312,6 @@ srmmu_nviking:
b go_to_highmem
nop ! wheee....
/* This remaps the kernel on Sun4/4xx machines
* that have the Sun Mutant Three Level MMU.
* It's like a platypus, Sun didn't have the
* SRMMU in conception so they kludged the three
* level logic in the regular Sun4 MMU probably.
*
* Basically, you take each entry in the top level
* directory that maps the low 3MB starting at
* address zero and put the mapping in the KERNBASE
* slots. These top level pgd's are called regmaps.
*/
sun4_mutant_remap:
or %g0, %g0, %g3 ! source base
sethi %hi(KERNBASE), %g4 ! destination base
or %g4, %lo(KERNBASE), %g4
sethi %hi(0x300000), %g5
or %g5, %lo(0x300000), %g5 ! upper bound 3MB
or %g0, 0x1, %l6
sll %l6, 24, %l6 ! Regmap mapping size
add %g3, 0x2, %g3 ! Base magic
add %g4, 0x2, %g4 ! Base magic
/* Main remapping loop on Sun4-Mutant-MMU.
* "I am not an animal..." -Famous Mutant Person
*/
sun4_mutant_loop:
lduha [%g3] ASI_REGMAP, %g2 ! Get lower entry
stha %g2, [%g4] ASI_REGMAP ! Store in high entry
add %g4, %l6, %g4 ! Move up high memory ptr
subcc %g3, %g5, %g0 ! Reached our limit?
blu sun4_mutant_loop ! Nope, loop again
add %g3, %l6, %g3 ! delay, Move up low ptr
b go_to_highmem ! Jump to high memory.
nop
/* The following is for non-4/4xx sun4 MMU's. */
sun4_normal_remap:
mov 0, %g3 ! source base
set KERNBASE, %g4 ! destination base
set 0x300000, %g5 ! upper bound 3MB
mov 1, %l6
sll %l6, 18, %l6 ! sun4 mmu segmap size
sun4_normal_loop:
lduha [%g3] ASI_SEGMAP, %g6 ! load phys_seg
stha %g6, [%g4] ASI_SEGMAP ! stort new virt mapping
add %g3, %l6, %g3 ! increment source pointer
subcc %g3, %g5, %g0 ! reached limit?
blu sun4_normal_loop ! nope, loop again
add %g4, %l6, %g4 ! delay, increment dest ptr
b go_to_highmem
nop
/* The following works for Sun4c MMU's */
sun4c_remap:
mov 0, %g3 ! source base
set KERNBASE, %g4 ! destination base
set 0x300000, %g5 ! upper bound 3MB
mov 1, %l6
sll %l6, 18, %l6 ! sun4c mmu segmap size
sun4c_remap_loop:
lda [%g3] ASI_SEGMAP, %g6 ! load phys_seg
sta %g6, [%g4] ASI_SEGMAP ! store new virt mapping
add %g3, %l6, %g3 ! Increment source ptr
subcc %g3, %g5, %g0 ! Reached limit?
bl sun4c_remap_loop ! Nope, loop again
add %g4, %l6, %g4 ! delay, Increment dest ptr
/* Now do a non-relative jump so that PC is in high-memory */
go_to_highmem:
@ -750,35 +336,12 @@ execute_in_high_mem:
sethi %hi(linux_dbvec), %g1
st %o1, [%g1 + %lo(linux_dbvec)]
ld [%o0 + 0x4], %o3
and %o3, 0x3, %o5 ! get the version
cmp %o3, 0x2 ! a v2 prom?
be found_version
nop
/* paul@sfe.com.au */
cmp %o3, 0x3 ! a v3 prom?
be found_version
nop
/* Old sun4's pass our load address into %o0 instead of the prom
* pointer. On sun4's you have to hard code the romvec pointer into
* your code. Sun probably still does that because they don't even
* trust their own "OpenBoot" specifications.
*/
set LOAD_ADDR, %g6
cmp %o0, %g6 ! an old sun4?
be sun4_init
nop
found_version:
/* Get the machine type via the mysterious romvec node operations. */
add %g7, 0x1c, %l1
add %g7, 0x1c, %l1
ld [%l1], %l0
ld [%l0], %l0
call %l0
call %l0
or %g0, %g0, %o0 ! next_node(0) = first_node
or %o0, %g0, %g6
@ -786,28 +349,13 @@ found_version:
or %o1, %lo(cputypvar), %o1
sethi %hi(cputypval), %o2 ! information, the string
or %o2, %lo(cputypval), %o2
ld [%l1], %l0 ! 'compatibility' tells
ld [%l1], %l0 ! 'compatible' tells
ld [%l0 + 0xc], %l0 ! that we want 'sun4x' where
call %l0 ! x is one of '', 'c', 'm',
nop ! 'd' or 'e'. %o2 holds pointer
call %l0 ! x is one of 'm', 'd' or 'e'.
nop ! %o2 holds pointer
! to a buf where above string
! will get stored by the prom.
subcc %o0, %g0, %g0
bpos got_prop ! Got the property
nop
or %g6, %g0, %o0
sethi %hi(cputypvar_sun4m), %o1
or %o1, %lo(cputypvar_sun4m), %o1
sethi %hi(cputypval), %o2
or %o2, %lo(cputypval), %o2
ld [%l1], %l0
ld [%l0 + 0xc], %l0
call %l0
nop
got_prop:
#ifdef CONFIG_SPARC_LEON
/* no cpu-type check is needed, it is a SPARC-LEON */
@ -826,45 +374,29 @@ got_prop:
/* Update boot_cpu_id only on boot cpu */
stub %g1, [%g2 + %lo(boot_cpu_id)]
ba sun4c_continue_boot
ba continue_boot
nop
#endif
/* Check to cputype. We may be booted on a sun4u (64 bit box),
* and sun4d needs special treatment.
*/
set cputypval, %o2
ldub [%o2 + 0x4], %l1
cmp %l1, ' '
be 1f
cmp %l1, 'c'
be 1f
cmp %l1, 'm'
be 1f
cmp %l1, 'm'
be sun4m_init
cmp %l1, 's'
be 1f
be sun4m_init
cmp %l1, 'd'
be 1f
be sun4d_init
cmp %l1, 'e'
be no_sun4e_here ! Could be a sun4e.
nop
b no_sun4u_here ! AIEEE, a V9 sun4u... Get our BIG BROTHER kernel :))
nop
1: set cputypval, %l1
ldub [%l1 + 0x4], %l1
cmp %l1, 'm' ! Test for sun4d, sun4e ?
be sun4m_init
cmp %l1, 's' ! Treat sun4s as sun4m
be sun4m_init
cmp %l1, 'd' ! Let us see how the beast will die
be sun4d_init
nop
/* Jump into mmu context zero. */
set AC_CONTEXT, %g1
stba %g0, [%g1] ASI_CONTROL
b sun4c_continue_boot
nop
/* CPUID in bootbus can be found at PA 0xff0140000 */
#define SUN4D_BOOTBUS_CPUID 0xf0140000
@ -892,66 +424,6 @@ sun4d_init:
/* Fall through to sun4m_init */
sun4m_init:
/* XXX Fucking Cypress... */
lda [%g0] ASI_M_MMUREGS, %g5
srl %g5, 28, %g4
cmp %g4, 1
bne 1f
srl %g5, 24, %g4
and %g4, 0xf, %g4
cmp %g4, 7 /* This would be a HyperSparc. */
bne 2f
nop
1:
#define PATCH_IT(dst, src) \
set (dst), %g5; \
set (src), %g4; \
ld [%g4], %g3; \
st %g3, [%g5]; \
ld [%g4+0x4], %g3; \
st %g3, [%g5+0x4];
/* Signed multiply. */
PATCH_IT(.mul, .mul_patch)
PATCH_IT(.mul+0x08, .mul_patch+0x08)
/* Signed remainder. */
PATCH_IT(.rem, .rem_patch)
PATCH_IT(.rem+0x08, .rem_patch+0x08)
PATCH_IT(.rem+0x10, .rem_patch+0x10)
PATCH_IT(.rem+0x18, .rem_patch+0x18)
PATCH_IT(.rem+0x20, .rem_patch+0x20)
PATCH_IT(.rem+0x28, .rem_patch+0x28)
/* Signed division. */
PATCH_IT(.div, .div_patch)
PATCH_IT(.div+0x08, .div_patch+0x08)
PATCH_IT(.div+0x10, .div_patch+0x10)
PATCH_IT(.div+0x18, .div_patch+0x18)
PATCH_IT(.div+0x20, .div_patch+0x20)
/* Unsigned multiply. */
PATCH_IT(.umul, .umul_patch)
PATCH_IT(.umul+0x08, .umul_patch+0x08)
/* Unsigned remainder. */
PATCH_IT(.urem, .urem_patch)
PATCH_IT(.urem+0x08, .urem_patch+0x08)
PATCH_IT(.urem+0x10, .urem_patch+0x10)
PATCH_IT(.urem+0x18, .urem_patch+0x18)
/* Unsigned division. */
PATCH_IT(.udiv, .udiv_patch)
PATCH_IT(.udiv+0x08, .udiv_patch+0x08)
PATCH_IT(.udiv+0x10, .udiv_patch+0x10)
#undef PATCH_IT
/* Ok, the PROM could have done funny things and apple cider could still
* be sitting in the fault status/address registers. Read them all to
* clear them so we don't get magic faults later on.
@ -962,7 +434,7 @@ sun4m_init:
srl %o1, 28, %o1 ! Get a type of the CPU
subcc %o1, 4, %g0 ! TI: Viking or MicroSPARC
be sun4c_continue_boot
be continue_boot
nop
set AC_M_SFSR, %o0
@ -972,7 +444,7 @@ sun4m_init:
/* Fujitsu MicroSPARC-II has no asynchronous flavors of FARs */
subcc %o1, 0, %g0
be sun4c_continue_boot
be continue_boot
nop
set AC_M_AFSR, %o0
@ -982,8 +454,7 @@ sun4m_init:
nop
sun4c_continue_boot:
continue_boot:
/* Aieee, now set PC and nPC, enable traps, give ourselves a stack and it's
* show-time!
@ -1026,10 +497,7 @@ sun4c_continue_boot:
mov %g0, %g3
stub %g3, [%g2 + %lo(boot_cpu_id)]
1: /* boot_cpu_id set. calculate boot_cpu_id4 = boot_cpu_id*4 */
sll %g3, 2, %g3
sethi %hi(boot_cpu_id4), %g2
stub %g3, [%g2 + %lo(boot_cpu_id4)]
1: sll %g3, 2, %g3
/* Initialize the uwinmask value for init task just in case.
* But first make current_set[boot_cpu_id] point to something useful.
@ -1165,19 +633,6 @@ sun4c_continue_boot:
call halt_me
nop
sun4_init:
sethi %hi(SUN4_PROM_VECTOR+0x84), %o1
ld [%o1 + %lo(SUN4_PROM_VECTOR+0x84)], %o1
set sun4_notsup, %o0
call %o1 /* printf */
nop
sethi %hi(SUN4_PROM_VECTOR+0xc4), %o1
ld [%o1 + %lo(SUN4_PROM_VECTOR+0xc4)], %o1
call %o1 /* exittomon */
nop
1: ba 1b ! Cannot exit into KMON
nop
no_sun4e_here:
ld [%g7 + 0x68], %o1
set sun4e_notsup, %o0

View File

@ -906,7 +906,7 @@ swapper_4m_tsb:
* error and will instead write junk into the relocation and
* you'll have an unbootable kernel.
*/
#include "ttable.S"
#include "ttable_64.S"
! 0x0000000000428000

View File

@ -25,22 +25,9 @@ static struct idprom idprom_buffer;
* of the Sparc CPU and have a meaningful IDPROM machtype value that we
* know about. See asm-sparc/machines.h for empirical constants.
*/
static struct Sun_Machine_Models Sun_Machines[NUM_SUN_MACHINES] = {
/* First, Sun4's */
{ .name = "Sun 4/100 Series", .id_machtype = (SM_SUN4 | SM_4_110) },
{ .name = "Sun 4/200 Series", .id_machtype = (SM_SUN4 | SM_4_260) },
{ .name = "Sun 4/300 Series", .id_machtype = (SM_SUN4 | SM_4_330) },
{ .name = "Sun 4/400 Series", .id_machtype = (SM_SUN4 | SM_4_470) },
/* Now Leon */
static struct Sun_Machine_Models Sun_Machines[] = {
/* First, Leon */
{ .name = "Leon3 System-on-a-Chip", .id_machtype = (M_LEON | M_LEON3_SOC) },
/* Now, Sun4c's */
{ .name = "Sun4c SparcStation 1", .id_machtype = (SM_SUN4C | SM_4C_SS1) },
{ .name = "Sun4c SparcStation IPC", .id_machtype = (SM_SUN4C | SM_4C_IPC) },
{ .name = "Sun4c SparcStation 1+", .id_machtype = (SM_SUN4C | SM_4C_SS1PLUS) },
{ .name = "Sun4c SparcStation SLC", .id_machtype = (SM_SUN4C | SM_4C_SLC) },
{ .name = "Sun4c SparcStation 2", .id_machtype = (SM_SUN4C | SM_4C_SS2) },
{ .name = "Sun4c SparcStation ELC", .id_machtype = (SM_SUN4C | SM_4C_ELC) },
{ .name = "Sun4c SparcStation IPX", .id_machtype = (SM_SUN4C | SM_4C_IPX) },
/* Finally, early Sun4m's */
{ .name = "Sun4m SparcSystem600", .id_machtype = (SM_SUN4M | SM_4M_SS60) },
{ .name = "Sun4m SparcStation10/20", .id_machtype = (SM_SUN4M | SM_4M_SS50) },
@ -53,7 +40,7 @@ static void __init display_system_type(unsigned char machtype)
char sysname[128];
register int i;
for (i = 0; i < NUM_SUN_MACHINES; i++) {
for (i = 0; i < ARRAY_SIZE(Sun_Machines); i++) {
if (Sun_Machines[i].id_machtype == machtype) {
if (machtype != (SM_SUN4M_OBP | 0x00) ||
prom_getproperty(prom_root_node, "banner-name",

View File

@ -50,6 +50,8 @@
#include <asm/io-unit.h>
#include <asm/leon.h>
const struct sparc32_dma_ops *sparc32_dma_ops;
/* This function must make sure that caches and memory are coherent after DMA
* On LEON systems without cache snooping it flushes the entire D-CACHE.
*/
@ -229,7 +231,7 @@ _sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz)
}
pa &= PAGE_MASK;
sparc_mapiorange(bus, pa, res->start, resource_size(res));
srmmu_mapiorange(bus, pa, res->start, resource_size(res));
return (void __iomem *)(unsigned long)(res->start + offset);
}
@ -243,7 +245,7 @@ static void _sparc_free_io(struct resource *res)
plen = resource_size(res);
BUG_ON((plen & (PAGE_SIZE-1)) != 0);
sparc_unmapiorange(res->start, plen);
srmmu_unmapiorange(res->start, plen);
release_resource(res);
}
@ -292,13 +294,13 @@ static void *sbus_alloc_coherent(struct device *dev, size_t len,
goto err_nova;
}
// XXX The mmu_map_dma_area does this for us below, see comments.
// sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
// XXX The sbus_map_dma_area does this for us below, see comments.
// srmmu_mapiorange(0, virt_to_phys(va), res->start, len_total);
/*
* XXX That's where sdev would be used. Currently we load
* all iommu tables with the same translations.
*/
if (mmu_map_dma_area(dev, dma_addrp, va, res->start, len_total) != 0)
if (sbus_map_dma_area(dev, dma_addrp, va, res->start, len_total) != 0)
goto err_noiommu;
res->name = op->dev.of_node->name;
@ -343,7 +345,7 @@ static void sbus_free_coherent(struct device *dev, size_t n, void *p,
kfree(res);
pgv = virt_to_page(p);
mmu_unmap_dma_area(dev, ba, n);
sbus_unmap_dma_area(dev, ba, n);
__free_pages(pgv, get_order(n));
}
@ -381,11 +383,6 @@ static int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n,
enum dma_data_direction dir, struct dma_attrs *attrs)
{
mmu_get_scsi_sgl(dev, sg, n);
/*
* XXX sparc64 can return a partial length here. sun4c should do this
* but it currently panics if it can't fulfill the request - Anton
*/
return n;
}
@ -469,7 +466,7 @@ static void *pci32_alloc_coherent(struct device *dev, size_t len,
printk("pci_alloc_consistent: cannot occupy 0x%lx", len_total);
goto err_nova;
}
sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
srmmu_mapiorange(0, virt_to_phys(va), res->start, len_total);
*pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */
return (void *) res->start;
@ -514,7 +511,7 @@ static void pci32_free_coherent(struct device *dev, size_t n, void *p,
}
dma_make_coherent(ba, n);
sparc_unmapiorange((unsigned long)p, n);
srmmu_unmapiorange((unsigned long)p, n);
release_resource(res);
kfree(res);

View File

@ -1,6 +1,5 @@
#include <linux/platform_device.h>
#include <asm/btfixup.h>
#include <asm/cpu_type.h>
struct irq_bucket {
@ -10,6 +9,9 @@ struct irq_bucket {
unsigned int pil;
};
#define SUN4M_HARD_INT(x) (0x000000001 << (x))
#define SUN4M_SOFT_INT(x) (0x000010000 << (x))
#define SUN4D_MAX_BOARD 10
#define SUN4D_MAX_IRQ ((SUN4D_MAX_BOARD + 2) << 5)
@ -41,52 +43,46 @@ struct sun4m_irq_global {
extern struct sun4m_irq_percpu __iomem *sun4m_irq_percpu[SUN4M_NCPUS];
extern struct sun4m_irq_global __iomem *sun4m_irq_global;
/* The following definitions describe the individual platform features: */
#define FEAT_L10_CLOCKSOURCE (1 << 0) /* L10 timer is used as a clocksource */
#define FEAT_L10_CLOCKEVENT (1 << 1) /* L10 timer is used as a clockevent */
#define FEAT_L14_ONESHOT (1 << 2) /* L14 timer clockevent can oneshot */
/*
* Platform specific irq configuration
* Platform specific configuration
* The individual platforms assign their platform
* specifics in their init functions.
*/
struct sparc_irq_config {
void (*init_timers)(irq_handler_t);
struct sparc_config {
void (*init_timers)(void);
unsigned int (*build_device_irq)(struct platform_device *op,
unsigned int real_irq);
/* generic clockevent features - see FEAT_* above */
int features;
/* clock rate used for clock event timer */
int clock_rate;
/* one period for clock source timer */
unsigned int cs_period;
/* function to obtain offsett for cs period */
unsigned int (*get_cycles_offset)(void);
void (*clear_clock_irq)(void);
void (*load_profile_irq)(int cpu, unsigned int limit);
};
extern struct sparc_irq_config sparc_irq_config;
extern struct sparc_config sparc_config;
unsigned int irq_alloc(unsigned int real_irq, unsigned int pil);
void irq_link(unsigned int irq);
void irq_unlink(unsigned int irq);
void handler_irq(unsigned int pil, struct pt_regs *regs);
/* Dave Redman (djhr@tadpole.co.uk)
* changed these to function pointers.. it saves cycles and will allow
* the irq dependencies to be split into different files at a later date
* sun4c_irq.c, sun4m_irq.c etc so we could reduce the kernel size.
* Jakub Jelinek (jj@sunsite.mff.cuni.cz)
* Changed these to btfixup entities... It saves cycles :)
*/
BTFIXUPDEF_CALL(void, clear_clock_irq, void)
BTFIXUPDEF_CALL(void, load_profile_irq, int, unsigned int)
static inline void clear_clock_irq(void)
{
BTFIXUP_CALL(clear_clock_irq)();
}
static inline void load_profile_irq(int cpu, int limit)
{
BTFIXUP_CALL(load_profile_irq)(cpu, limit);
}
unsigned long leon_get_irqmask(unsigned int irq);
#ifdef CONFIG_SMP
BTFIXUPDEF_CALL(void, set_cpu_int, int, int)
BTFIXUPDEF_CALL(void, clear_cpu_int, int, int)
BTFIXUPDEF_CALL(void, set_irq_udt, int)
#define set_cpu_int(cpu,level) BTFIXUP_CALL(set_cpu_int)(cpu,level)
#define clear_cpu_int(cpu,level) BTFIXUP_CALL(clear_cpu_int)(cpu,level)
#define set_irq_udt(cpu) BTFIXUP_CALL(set_irq_udt)(cpu)
/* All SUN4D IPIs are sent on this IRQ, may be shared with hard IRQs */
#define SUN4D_IPI_IRQ 13

View File

@ -23,16 +23,8 @@
#include "kernel.h"
#include "irq.h"
#ifdef CONFIG_SMP
#define SMP_NOP2 "nop; nop;\n\t"
#define SMP_NOP3 "nop; nop; nop;\n\t"
#else
#define SMP_NOP2
#define SMP_NOP3
#endif /* SMP */
/* platform specific irq setup */
struct sparc_irq_config sparc_irq_config;
struct sparc_config sparc_config;
unsigned long arch_local_irq_save(void)
{
@ -41,7 +33,6 @@ unsigned long arch_local_irq_save(void)
__asm__ __volatile__(
"rd %%psr, %0\n\t"
SMP_NOP3 /* Sun4m + Cypress + SMP bug */
"or %0, %2, %1\n\t"
"wr %1, 0, %%psr\n\t"
"nop; nop; nop\n"
@ -59,7 +50,6 @@ void arch_local_irq_enable(void)
__asm__ __volatile__(
"rd %%psr, %0\n\t"
SMP_NOP3 /* Sun4m + Cypress + SMP bug */
"andn %0, %1, %0\n\t"
"wr %0, 0, %%psr\n\t"
"nop; nop; nop\n"
@ -76,7 +66,6 @@ void arch_local_irq_restore(unsigned long old_psr)
__asm__ __volatile__(
"rd %%psr, %0\n\t"
"and %2, %1, %2\n\t"
SMP_NOP2 /* Sun4m + Cypress + SMP bug */
"andn %0, %1, %0\n\t"
"wr %0, %2, %%psr\n\t"
"nop; nop; nop\n"
@ -346,11 +335,6 @@ void sparc_floppy_irq(int irq, void *dev_id, struct pt_regs *regs)
void __init init_IRQ(void)
{
switch (sparc_cpu_model) {
case sun4c:
case sun4:
sun4c_init_IRQ();
break;
case sun4m:
pcic_probe();
if (pcic_present())
@ -371,6 +355,5 @@ void __init init_IRQ(void)
prom_printf("Cannot initialize IRQs on this Sun machine...");
break;
}
btfixup();
}

View File

@ -799,7 +799,7 @@ static void kill_prom_timer(void)
prom_limit0 = prom_timers->limit0;
prom_limit1 = prom_timers->limit1;
/* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
/* Just as in sun4c PROM uses timer which ticks at IRQ 14.
* We turn both off here just to be paranoid.
*/
prom_timers->limit0 = 0;

View File

@ -32,9 +32,6 @@ extern void cpu_probe(void);
/* traps_32.c */
extern void handle_hw_divzero(struct pt_regs *regs, unsigned long pc,
unsigned long npc, unsigned long psr);
/* muldiv.c */
extern int do_user_muldiv (struct pt_regs *, unsigned long);
/* irq_32.c */
extern struct irqaction static_irqaction[];
extern int static_irq_count;
@ -43,12 +40,7 @@ extern spinlock_t irq_action_lock;
extern void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs);
extern void init_IRQ(void);
/* sun4c_irq.c */
extern void sun4c_init_IRQ(void);
/* sun4m_irq.c */
extern unsigned int lvl14_resolution;
extern void sun4m_init_IRQ(void);
extern void sun4m_unmask_profile_irq(void);
extern void sun4m_clear_profile_irq(int cpu);
@ -85,8 +77,6 @@ extern unsigned int patchme_maybe_smp_msg[];
extern void floppy_hardint(void);
/* trampoline_32.S */
extern int __smp4m_processor_id(void);
extern int __smp4d_processor_id(void);
extern unsigned long sun4m_cpu_startup;
extern unsigned long sun4d_cpu_startup;

View File

@ -10,6 +10,8 @@
#include <linux/of_platform.h>
#include <linux/interrupt.h>
#include <linux/of_device.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <asm/oplib.h>
#include <asm/timer.h>
@ -84,7 +86,7 @@ void leon_eirq_setup(unsigned int eirq)
sparc_leon_eirq = eirq;
}
static inline unsigned long get_irqmask(unsigned int irq)
unsigned long leon_get_irqmask(unsigned int irq)
{
unsigned long mask;
@ -210,7 +212,7 @@ unsigned int leon_build_device_irq(unsigned int real_irq,
unsigned long mask;
irq = 0;
mask = get_irqmask(real_irq);
mask = leon_get_irqmask(real_irq);
if (mask == 0)
goto out;
@ -250,7 +252,38 @@ void leon_update_virq_handling(unsigned int virq,
irq_set_chip_data(virq, (void *)mask);
}
void __init leon_init_timers(irq_handler_t counter_fn)
static u32 leon_cycles_offset(void)
{
u32 rld, val, off;
rld = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].rld);
val = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].val);
off = rld - val;
return rld - val;
}
#ifdef CONFIG_SMP
/* smp clockevent irq */
irqreturn_t leon_percpu_timer_ce_interrupt(int irq, void *unused)
{
struct clock_event_device *ce;
int cpu = smp_processor_id();
leon_clear_profile_irq(cpu);
ce = &per_cpu(sparc32_clockevent, cpu);
irq_enter();
if (ce->event_handler)
ce->event_handler(ce);
irq_exit();
return IRQ_HANDLED;
}
#endif /* CONFIG_SMP */
void __init leon_init_timers(void)
{
int irq, eirq;
struct device_node *rootnp, *np, *nnp;
@ -260,6 +293,14 @@ void __init leon_init_timers(irq_handler_t counter_fn)
int ampopts;
int err;
sparc_config.get_cycles_offset = leon_cycles_offset;
sparc_config.cs_period = 1000000 / HZ;
sparc_config.features |= FEAT_L10_CLOCKSOURCE;
#ifndef CONFIG_SMP
sparc_config.features |= FEAT_L10_CLOCKEVENT;
#endif
leondebug_irq_disable = 0;
leon_debug_irqout = 0;
master_l10_counter = (unsigned int *)&dummy_master_l10_counter;
@ -369,7 +410,7 @@ void __init leon_init_timers(irq_handler_t counter_fn)
leon_eirq_setup(eirq);
irq = _leon_build_device_irq(NULL, leon3_gptimer_irq+leon3_gptimer_idx);
err = request_irq(irq, counter_fn, IRQF_TIMER, "timer", NULL);
err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL);
if (err) {
printk(KERN_ERR "unable to attach timer IRQ%d\n", irq);
prom_halt();
@ -386,7 +427,7 @@ void __init leon_init_timers(irq_handler_t counter_fn)
*/
local_irq_save(flags);
patchme_maybe_smp_msg[0] = 0x01000000; /* NOP out the branch */
local_flush_cache_all();
local_ops->cache_all();
local_irq_restore(flags);
}
#endif
@ -401,7 +442,7 @@ void __init leon_init_timers(irq_handler_t counter_fn)
/* Install per-cpu IRQ handler for broadcasted ticker */
irq = leon_build_device_irq(leon3_ticker_irq, handle_percpu_irq,
"per-cpu", 0);
err = request_irq(irq, leon_percpu_timer_interrupt,
err = request_irq(irq, leon_percpu_timer_ce_interrupt,
IRQF_PERCPU | IRQF_TIMER, "ticker",
NULL);
if (err) {
@ -422,13 +463,12 @@ bad:
return;
}
void leon_clear_clock_irq(void)
static void leon_clear_clock_irq(void)
{
}
void leon_load_profile_irq(int cpu, unsigned int limit)
static void leon_load_profile_irq(int cpu, unsigned int limit)
{
BUG();
}
void __init leon_trans_init(struct device_node *dp)
@ -457,25 +497,6 @@ void __init leon_node_init(struct device_node *dp, struct device_node ***nextp)
}
#ifdef CONFIG_SMP
void leon_set_cpu_int(int cpu, int level)
{
unsigned long mask;
mask = get_irqmask(level);
LEON3_BYPASS_STORE_PA(&leon3_irqctrl_regs->force[cpu], mask);
}
static void leon_clear_ipi(int cpu, int level)
{
unsigned long mask;
mask = get_irqmask(level);
LEON3_BYPASS_STORE_PA(&leon3_irqctrl_regs->force[cpu], mask<<16);
}
static void leon_set_udt(int cpu)
{
}
void leon_clear_profile_irq(int cpu)
{
}
@ -483,7 +504,7 @@ void leon_clear_profile_irq(int cpu)
void leon_enable_irq_cpu(unsigned int irq_nr, unsigned int cpu)
{
unsigned long mask, flags, *addr;
mask = get_irqmask(irq_nr);
mask = leon_get_irqmask(irq_nr);
spin_lock_irqsave(&leon_irq_lock, flags);
addr = (unsigned long *)LEON_IMASK(cpu);
LEON3_BYPASS_STORE_PA(addr, (LEON3_BYPASS_LOAD_PA(addr) | mask));
@ -494,20 +515,11 @@ void leon_enable_irq_cpu(unsigned int irq_nr, unsigned int cpu)
void __init leon_init_IRQ(void)
{
sparc_irq_config.init_timers = leon_init_timers;
sparc_irq_config.build_device_irq = _leon_build_device_irq;
BTFIXUPSET_CALL(clear_clock_irq, leon_clear_clock_irq,
BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(load_profile_irq, leon_load_profile_irq,
BTFIXUPCALL_NOP);
#ifdef CONFIG_SMP
BTFIXUPSET_CALL(set_cpu_int, leon_set_cpu_int, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(clear_cpu_int, leon_clear_ipi, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(set_irq_udt, leon_set_udt, BTFIXUPCALL_NORM);
#endif
sparc_config.init_timers = leon_init_timers;
sparc_config.build_device_irq = _leon_build_device_irq;
sparc_config.clock_rate = 1000000;
sparc_config.clear_clock_irq = leon_clear_clock_irq;
sparc_config.load_profile_irq = leon_load_profile_irq;
}
void __init leon_init(void)

View File

@ -24,6 +24,7 @@
#include <linux/delay.h>
#include <linux/gfp.h>
#include <linux/cpu.h>
#include <linux/clockchips.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
@ -43,6 +44,7 @@
#include <asm/asi.h>
#include <asm/leon.h>
#include <asm/leon_amba.h>
#include <asm/timer.h>
#include "kernel.h"
@ -69,26 +71,24 @@ static inline unsigned long do_swap(volatile unsigned long *ptr,
return val;
}
static void smp_setup_percpu_timer(void);
void __cpuinit leon_callin(void)
{
int cpuid = hard_smpleon_processor_id();
int cpuid = hard_smp_processor_id();
local_flush_cache_all();
local_flush_tlb_all();
local_ops->cache_all();
local_ops->tlb_all();
leon_configure_cache_smp();
notify_cpu_starting(cpuid);
/* Get our local ticker going. */
smp_setup_percpu_timer();
register_percpu_ce(cpuid);
calibrate_delay();
smp_store_cpu_info(cpuid);
local_flush_cache_all();
local_flush_tlb_all();
local_ops->cache_all();
local_ops->tlb_all();
/*
* Unblock the master CPU _only_ when the scheduler state
@ -99,8 +99,8 @@ void __cpuinit leon_callin(void)
*/
do_swap(&cpu_callin_map[cpuid], 1);
local_flush_cache_all();
local_flush_tlb_all();
local_ops->cache_all();
local_ops->tlb_all();
/* Fix idle thread fields. */
__asm__ __volatile__("ld [%0], %%g6\n\t" : : "r"(&current_set[cpuid])
@ -143,8 +143,8 @@ void __init leon_configure_cache_smp(void)
}
}
local_flush_cache_all();
local_flush_tlb_all();
local_ops->cache_all();
local_ops->tlb_all();
}
void leon_smp_setbroadcast(unsigned int mask)
@ -199,8 +199,7 @@ void __init leon_boot_cpus(void)
leon_smp_setbroadcast(1 << LEON3_IRQ_TICKER);
leon_configure_cache_smp();
smp_setup_percpu_timer();
local_flush_cache_all();
local_ops->cache_all();
}
@ -227,7 +226,7 @@ int __cpuinit leon_boot_one_cpu(int i)
/* whirrr, whirrr, whirrrrrrrrr... */
printk(KERN_INFO "Starting CPU %d : (irqmp: 0x%x)\n", (unsigned int)i,
(unsigned int)&leon3_irqctrl_regs->mpstatus);
local_flush_cache_all();
local_ops->cache_all();
/* Make sure all IRQs are of from the start for this new CPU */
LEON_BYPASS_STORE_PA(&leon3_irqctrl_regs->mask[i], 0);
@ -252,7 +251,7 @@ int __cpuinit leon_boot_one_cpu(int i)
leon_enable_irq_cpu(leon_ipi_irq, i);
}
local_flush_cache_all();
local_ops->cache_all();
return 0;
}
@ -272,7 +271,7 @@ void __init leon_smp_done(void)
}
}
*prev = first;
local_flush_cache_all();
local_ops->cache_all();
/* Free unneeded trap tables */
if (!cpu_present(1)) {
@ -338,7 +337,7 @@ static void __init leon_ipi_init(void)
local_irq_save(flags);
trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (leon_ipi_irq - 1)];
trap_table->inst_three += smpleon_ipi - real_irq_entry;
local_flush_cache_all();
local_ops->cache_all();
local_irq_restore(flags);
for_each_possible_cpu(cpu) {
@ -347,6 +346,13 @@ static void __init leon_ipi_init(void)
}
}
static void leon_send_ipi(int cpu, int level)
{
unsigned long mask;
mask = leon_get_irqmask(level);
LEON3_BYPASS_STORE_PA(&leon3_irqctrl_regs->force[cpu], mask);
}
static void leon_ipi_single(int cpu)
{
struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu);
@ -355,7 +361,7 @@ static void leon_ipi_single(int cpu)
work->single = 1;
/* Generate IRQ on the CPU */
set_cpu_int(cpu, leon_ipi_irq);
leon_send_ipi(cpu, leon_ipi_irq);
}
static void leon_ipi_mask_one(int cpu)
@ -366,7 +372,7 @@ static void leon_ipi_mask_one(int cpu)
work->msk = 1;
/* Generate IRQ on the CPU */
set_cpu_int(cpu, leon_ipi_irq);
leon_send_ipi(cpu, leon_ipi_irq);
}
static void leon_ipi_resched(int cpu)
@ -377,7 +383,7 @@ static void leon_ipi_resched(int cpu)
work->resched = 1;
/* Generate IRQ on the CPU (any IRQ will cause resched) */
set_cpu_int(cpu, leon_ipi_irq);
leon_send_ipi(cpu, leon_ipi_irq);
}
void leonsmp_ipi_interrupt(void)
@ -449,7 +455,7 @@ static void leon_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
if (cpumask_test_cpu(i, &mask)) {
ccall_info.processors_in[i] = 0;
ccall_info.processors_out[i] = 0;
set_cpu_int(i, LEON3_IRQ_CROSS_CALL);
leon_send_ipi(i, LEON3_IRQ_CROSS_CALL);
}
}
@ -492,68 +498,19 @@ void leon_cross_call_irq(void)
ccall_info.processors_out[i] = 1;
}
irqreturn_t leon_percpu_timer_interrupt(int irq, void *unused)
{
int cpu = smp_processor_id();
leon_clear_profile_irq(cpu);
profile_tick(CPU_PROFILING);
if (!--prof_counter(cpu)) {
int user = user_mode(get_irq_regs());
update_process_times(user);
prof_counter(cpu) = prof_multiplier(cpu);
}
return IRQ_HANDLED;
}
static void __init smp_setup_percpu_timer(void)
{
int cpu = smp_processor_id();
prof_counter(cpu) = prof_multiplier(cpu) = 1;
}
void __init leon_blackbox_id(unsigned *addr)
{
int rd = *addr & 0x3e000000;
int rs1 = rd >> 11;
/* patch places where ___b_hard_smp_processor_id appears */
addr[0] = 0x81444000 | rd; /* rd %asr17, reg */
addr[1] = 0x8130201c | rd | rs1; /* srl reg, 0x1c, reg */
addr[2] = 0x01000000; /* nop */
}
void __init leon_blackbox_current(unsigned *addr)
{
int rd = *addr & 0x3e000000;
int rs1 = rd >> 11;
/* patch LOAD_CURRENT macro where ___b_load_current appears */
addr[0] = 0x81444000 | rd; /* rd %asr17, reg */
addr[2] = 0x8130201c | rd | rs1; /* srl reg, 0x1c, reg */
addr[4] = 0x81282002 | rd | rs1; /* sll reg, 0x2, reg */
}
static const struct sparc32_ipi_ops leon_ipi_ops = {
.cross_call = leon_cross_call,
.resched = leon_ipi_resched,
.single = leon_ipi_single,
.mask_one = leon_ipi_mask_one,
};
void __init leon_init_smp(void)
{
/* Patch ipi15 trap table */
t_nmi[1] = t_nmi[1] + (linux_trap_ipi15_leon - linux_trap_ipi15_sun4m);
BTFIXUPSET_BLACKBOX(hard_smp_processor_id, leon_blackbox_id);
BTFIXUPSET_BLACKBOX(load_current, leon_blackbox_current);
BTFIXUPSET_CALL(smp_cross_call, leon_cross_call, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(__hard_smp_processor_id, __leon_processor_id,
BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(smp_ipi_resched, leon_ipi_resched, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(smp_ipi_single, leon_ipi_single, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(smp_ipi_mask_one, leon_ipi_mask_one, BTFIXUPCALL_NORM);
sparc32_ipi_ops = &leon_ipi_ops;
}
#endif /* CONFIG_SPARC_LEON */

View File

@ -32,26 +32,11 @@ static void *module_map(unsigned long size)
GFP_KERNEL, PAGE_KERNEL, -1,
__builtin_return_address(0));
}
static char *dot2underscore(char *name)
{
return name;
}
#else
static void *module_map(unsigned long size)
{
return vmalloc(size);
}
/* Replace references to .func with _Func */
static char *dot2underscore(char *name)
{
if (name[0] == '.') {
name[0] = '_';
name[1] = toupper(name[1]);
}
return name;
}
#endif /* CONFIG_SPARC64 */
void *module_alloc(unsigned long size)
@ -93,12 +78,8 @@ int module_frob_arch_sections(Elf_Ehdr *hdr,
for (i = 1; i < sechdrs[symidx].sh_size / sizeof(Elf_Sym); i++) {
if (sym[i].st_shndx == SHN_UNDEF) {
if (ELF_ST_TYPE(sym[i].st_info) == STT_REGISTER) {
if (ELF_ST_TYPE(sym[i].st_info) == STT_REGISTER)
sym[i].st_shndx = SHN_ABS;
} else {
char *name = strtab + sym[i].st_name;
dot2underscore(name);
}
}
}
return 0;

View File

@ -1,238 +0,0 @@
/*
* muldiv.c: Hardware multiply/division illegal instruction trap
* for sun4c/sun4 (which do not have those instructions)
*
* Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*
* 2004-12-25 Krzysztof Helt (krzysztof.h1@wp.pl)
* - fixed registers constrains in inline assembly declarations
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
#include "kernel.h"
/* #define DEBUG_MULDIV */
static inline int has_imm13(int insn)
{
return (insn & 0x2000);
}
static inline int is_foocc(int insn)
{
return (insn & 0x800000);
}
static inline int sign_extend_imm13(int imm)
{
return imm << 19 >> 19;
}
static inline void advance(struct pt_regs *regs)
{
regs->pc = regs->npc;
regs->npc += 4;
}
static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2,
unsigned int rd)
{
if(rs2 >= 16 || rs1 >= 16 || rd >= 16) {
/* Wheee... */
__asm__ __volatile__("save %sp, -0x40, %sp\n\t"
"save %sp, -0x40, %sp\n\t"
"save %sp, -0x40, %sp\n\t"
"save %sp, -0x40, %sp\n\t"
"save %sp, -0x40, %sp\n\t"
"save %sp, -0x40, %sp\n\t"
"save %sp, -0x40, %sp\n\t"
"restore; restore; restore; restore;\n\t"
"restore; restore; restore;\n\t");
}
}
#define fetch_reg(reg, regs) ({ \
struct reg_window32 __user *win; \
register unsigned long ret; \
\
if (!(reg)) ret = 0; \
else if ((reg) < 16) { \
ret = regs->u_regs[(reg)]; \
} else { \
/* Ho hum, the slightly complicated case. */ \
win = (struct reg_window32 __user *)regs->u_regs[UREG_FP];\
if (get_user (ret, &win->locals[(reg) - 16])) return -1;\
} \
ret; \
})
static inline int
store_reg(unsigned int result, unsigned int reg, struct pt_regs *regs)
{
struct reg_window32 __user *win;
if (!reg)
return 0;
if (reg < 16) {
regs->u_regs[reg] = result;
return 0;
} else {
/* need to use put_user() in this case: */
win = (struct reg_window32 __user *) regs->u_regs[UREG_FP];
return (put_user(result, &win->locals[reg - 16]));
}
}
/* Should return 0 if mul/div emulation succeeded and SIGILL should
* not be issued.
*/
int do_user_muldiv(struct pt_regs *regs, unsigned long pc)
{
unsigned int insn;
int inst;
unsigned int rs1, rs2, rdv;
if (!pc)
return -1; /* This happens to often, I think */
if (get_user (insn, (unsigned int __user *)pc))
return -1;
if ((insn & 0xc1400000) != 0x80400000)
return -1;
inst = ((insn >> 19) & 0xf);
if ((inst & 0xe) != 10 && (inst & 0xe) != 14)
return -1;
/* Now we know we have to do something with umul, smul, udiv or sdiv */
rs1 = (insn >> 14) & 0x1f;
rs2 = insn & 0x1f;
rdv = (insn >> 25) & 0x1f;
if (has_imm13(insn)) {
maybe_flush_windows(rs1, 0, rdv);
rs2 = sign_extend_imm13(insn);
} else {
maybe_flush_windows(rs1, rs2, rdv);
rs2 = fetch_reg(rs2, regs);
}
rs1 = fetch_reg(rs1, regs);
switch (inst) {
case 10: /* umul */
#ifdef DEBUG_MULDIV
printk ("unsigned muldiv: 0x%x * 0x%x = ", rs1, rs2);
#endif
__asm__ __volatile__ ("\n\t"
"mov %0, %%o0\n\t"
"call .umul\n\t"
" mov %1, %%o1\n\t"
"mov %%o0, %0\n\t"
"mov %%o1, %1\n\t"
: "=r" (rs1), "=r" (rs2)
: "0" (rs1), "1" (rs2)
: "o0", "o1", "o2", "o3", "o4", "o5", "o7", "cc");
#ifdef DEBUG_MULDIV
printk ("0x%x%08x\n", rs2, rs1);
#endif
if (store_reg(rs1, rdv, regs))
return -1;
regs->y = rs2;
break;
case 11: /* smul */
#ifdef DEBUG_MULDIV
printk ("signed muldiv: 0x%x * 0x%x = ", rs1, rs2);
#endif
__asm__ __volatile__ ("\n\t"
"mov %0, %%o0\n\t"
"call .mul\n\t"
" mov %1, %%o1\n\t"
"mov %%o0, %0\n\t"
"mov %%o1, %1\n\t"
: "=r" (rs1), "=r" (rs2)
: "0" (rs1), "1" (rs2)
: "o0", "o1", "o2", "o3", "o4", "o5", "o7", "cc");
#ifdef DEBUG_MULDIV
printk ("0x%x%08x\n", rs2, rs1);
#endif
if (store_reg(rs1, rdv, regs))
return -1;
regs->y = rs2;
break;
case 14: /* udiv */
#ifdef DEBUG_MULDIV
printk ("unsigned muldiv: 0x%x%08x / 0x%x = ", regs->y, rs1, rs2);
#endif
if (!rs2) {
#ifdef DEBUG_MULDIV
printk ("DIVISION BY ZERO\n");
#endif
handle_hw_divzero (regs, pc, regs->npc, regs->psr);
return 0;
}
__asm__ __volatile__ ("\n\t"
"mov %2, %%o0\n\t"
"mov %0, %%o1\n\t"
"mov %%g0, %%o2\n\t"
"call __udivdi3\n\t"
" mov %1, %%o3\n\t"
"mov %%o1, %0\n\t"
"mov %%o0, %1\n\t"
: "=r" (rs1), "=r" (rs2)
: "r" (regs->y), "0" (rs1), "1" (rs2)
: "o0", "o1", "o2", "o3", "o4", "o5", "o7",
"g1", "g2", "g3", "cc");
#ifdef DEBUG_MULDIV
printk ("0x%x\n", rs1);
#endif
if (store_reg(rs1, rdv, regs))
return -1;
break;
case 15: /* sdiv */
#ifdef DEBUG_MULDIV
printk ("signed muldiv: 0x%x%08x / 0x%x = ", regs->y, rs1, rs2);
#endif
if (!rs2) {
#ifdef DEBUG_MULDIV
printk ("DIVISION BY ZERO\n");
#endif
handle_hw_divzero (regs, pc, regs->npc, regs->psr);
return 0;
}
__asm__ __volatile__ ("\n\t"
"mov %2, %%o0\n\t"
"mov %0, %%o1\n\t"
"mov %%g0, %%o2\n\t"
"call __divdi3\n\t"
" mov %1, %%o3\n\t"
"mov %%o1, %0\n\t"
"mov %%o0, %1\n\t"
: "=r" (rs1), "=r" (rs2)
: "r" (regs->y), "0" (rs1), "1" (rs2)
: "o0", "o1", "o2", "o3", "o4", "o5", "o7",
"g1", "g2", "g3", "cc");
#ifdef DEBUG_MULDIV
printk ("0x%x\n", rs1);
#endif
if (store_reg(rs1, rdv, regs))
return -1;
break;
}
if (is_foocc (insn)) {
regs->psr &= ~PSR_ICC;
if ((inst & 0xe) == 14) {
/* ?div */
if (rs2) regs->psr |= PSR_V;
}
if (!rs1) regs->psr |= PSR_Z;
if (((int)rs1) < 0) regs->psr |= PSR_N;
#ifdef DEBUG_MULDIV
printk ("psr muldiv: %08x\n", regs->psr);
#endif
}
advance(regs);
return 0;
}

View File

@ -356,7 +356,7 @@ static struct platform_device * __init scan_one_device(struct device_node *dp,
op->archdata.num_irqs = len / sizeof(struct linux_prom_irqs);
for (i = 0; i < op->archdata.num_irqs; i++)
op->archdata.irqs[i] =
sparc_irq_config.build_device_irq(op, intr[i].pri);
sparc_config.build_device_irq(op, intr[i].pri);
} else {
const unsigned int *irq =
of_get_property(dp, "interrupts", &len);
@ -365,7 +365,7 @@ static struct platform_device * __init scan_one_device(struct device_node *dp,
op->archdata.num_irqs = len / sizeof(unsigned int);
for (i = 0; i < op->archdata.num_irqs; i++)
op->archdata.irqs[i] =
sparc_irq_config.build_device_irq(op, irq[i]);
sparc_config.build_device_irq(op, irq[i]);
} else {
op->archdata.num_irqs = 0;
}

View File

@ -703,31 +703,28 @@ static void pcic_clear_clock_irq(void)
pcic_timer_dummy = readl(pcic0.pcic_regs+PCI_SYS_LIMIT);
}
static irqreturn_t pcic_timer_handler (int irq, void *h)
{
pcic_clear_clock_irq();
xtime_update(1);
#ifndef CONFIG_SMP
update_process_times(user_mode(get_irq_regs()));
#endif
return IRQ_HANDLED;
}
/* CPU frequency is 100 MHz, timer increments every 4 CPU clocks */
#define USECS_PER_JIFFY (1000000 / HZ)
#define TICK_TIMER_LIMIT ((100 * 1000000 / 4) / HZ)
#define USECS_PER_JIFFY 10000 /* We have 100HZ "standard" timer for sparc */
#define TICK_TIMER_LIMIT ((100*1000000/4)/100)
u32 pci_gettimeoffset(void)
static unsigned int pcic_cycles_offset(void)
{
u32 value, count;
value = readl(pcic0.pcic_regs + PCI_SYS_COUNTER);
count = value & ~PCI_SYS_COUNTER_OVERFLOW;
if (value & PCI_SYS_COUNTER_OVERFLOW)
count += TICK_TIMER_LIMIT;
/*
* We divide all by 100
* We divide all by HZ
* to have microsecond resolution and to avoid overflow
*/
unsigned long count =
readl(pcic0.pcic_regs+PCI_SYS_COUNTER) & ~PCI_SYS_COUNTER_OVERFLOW;
count = ((count/100)*USECS_PER_JIFFY) / (TICK_TIMER_LIMIT/100);
return count * 1000;
}
count = ((count / HZ) * USECS_PER_JIFFY) / (TICK_TIMER_LIMIT / HZ);
/* Coordinate with the sparc_config.clock_rate setting */
return count * 2;
}
void __init pci_time_init(void)
{
@ -736,9 +733,16 @@ void __init pci_time_init(void)
int timer_irq, irq;
int err;
do_arch_gettimeoffset = pci_gettimeoffset;
btfixup();
#ifndef CONFIG_SMP
/*
* The clock_rate is in SBUS dimension.
* We take into account this in pcic_cycles_offset()
*/
sparc_config.clock_rate = SBUS_CLOCK_RATE / HZ;
sparc_config.features |= FEAT_L10_CLOCKEVENT;
#endif
sparc_config.features |= FEAT_L10_CLOCKSOURCE;
sparc_config.get_cycles_offset = pcic_cycles_offset;
writel (TICK_TIMER_LIMIT, pcic->pcic_regs+PCI_SYS_LIMIT);
/* PROM should set appropriate irq */
@ -747,7 +751,7 @@ void __init pci_time_init(void)
writel (PCI_COUNTER_IRQ_SET(timer_irq, 0),
pcic->pcic_regs+PCI_COUNTER_IRQ);
irq = pcic_build_device_irq(NULL, timer_irq);
err = request_irq(irq, pcic_timer_handler,
err = request_irq(irq, timer_interrupt,
IRQF_TIMER, "timer", NULL);
if (err) {
prom_printf("time_init: unable to attach IRQ%d\n", timer_irq);
@ -875,10 +879,9 @@ static void pcic_load_profile_irq(int cpu, unsigned int limit)
void __init sun4m_pci_init_IRQ(void)
{
sparc_irq_config.build_device_irq = pcic_build_device_irq;
BTFIXUPSET_CALL(clear_clock_irq, pcic_clear_clock_irq, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(load_profile_irq, pcic_load_profile_irq, BTFIXUPCALL_NORM);
sparc_config.build_device_irq = pcic_build_device_irq;
sparc_config.clear_clock_irq = pcic_clear_clock_irq;
sparc_config.load_profile_irq = pcic_load_profile_irq;
}
int pcibios_assign_resource(struct pci_dev *pdev, int resource)

View File

@ -67,8 +67,6 @@ struct thread_info *current_set[NR_CPUS];
#ifndef CONFIG_SMP
#define SUN4C_FAULT_HIGH 100
/*
* the idle loop on a Sparc... ;)
*/
@ -76,36 +74,6 @@ void cpu_idle(void)
{
/* endless idle loop with no priority at all */
for (;;) {
if (ARCH_SUN4C) {
static int count = HZ;
static unsigned long last_jiffies;
static unsigned long last_faults;
static unsigned long fps;
unsigned long now;
unsigned long faults;
extern unsigned long sun4c_kernel_faults;
extern void sun4c_grow_kernel_ring(void);
local_irq_disable();
now = jiffies;
count -= (now - last_jiffies);
last_jiffies = now;
if (count < 0) {
count += HZ;
faults = sun4c_kernel_faults;
fps = (fps + (faults - last_faults)) >> 1;
last_faults = faults;
#if 0
printk("kernel faults / second = %ld\n", fps);
#endif
if (fps >= SUN4C_FAULT_HIGH) {
sun4c_grow_kernel_ring();
}
}
local_irq_enable();
}
if (pm_idle) {
while (!need_resched())
(*pm_idle)();
@ -114,7 +82,6 @@ void cpu_idle(void)
cpu_relax();
}
schedule_preempt_disabled();
check_pgt_cache();
}
}
@ -137,7 +104,6 @@ void cpu_idle(void)
cpu_relax();
}
schedule_preempt_disabled();
check_pgt_cache();
}
}
@ -179,88 +145,6 @@ void machine_power_off(void)
machine_halt();
}
#if 0
static DEFINE_SPINLOCK(sparc_backtrace_lock);
void __show_backtrace(unsigned long fp)
{
struct reg_window32 *rw;
unsigned long flags;
int cpu = smp_processor_id();
spin_lock_irqsave(&sparc_backtrace_lock, flags);
rw = (struct reg_window32 *)fp;
while(rw && (((unsigned long) rw) >= PAGE_OFFSET) &&
!(((unsigned long) rw) & 0x7)) {
printk("CPU[%d]: ARGS[%08lx,%08lx,%08lx,%08lx,%08lx,%08lx] "
"FP[%08lx] CALLER[%08lx]: ", cpu,
rw->ins[0], rw->ins[1], rw->ins[2], rw->ins[3],
rw->ins[4], rw->ins[5],
rw->ins[6],
rw->ins[7]);
printk("%pS\n", (void *) rw->ins[7]);
rw = (struct reg_window32 *) rw->ins[6];
}
spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
}
#define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
#define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
#define __GET_FP(fp) __asm__ __volatile__("mov %%i6, %0" : "=r" (fp))
void show_backtrace(void)
{
unsigned long fp;
__SAVE; __SAVE; __SAVE; __SAVE;
__SAVE; __SAVE; __SAVE; __SAVE;
__RESTORE; __RESTORE; __RESTORE; __RESTORE;
__RESTORE; __RESTORE; __RESTORE; __RESTORE;
__GET_FP(fp);
__show_backtrace(fp);
}
#ifdef CONFIG_SMP
void smp_show_backtrace_all_cpus(void)
{
xc0((smpfunc_t) show_backtrace);
show_backtrace();
}
#endif
void show_stackframe(struct sparc_stackf *sf)
{
unsigned long size;
unsigned long *stk;
int i;
printk("l0: %08lx l1: %08lx l2: %08lx l3: %08lx "
"l4: %08lx l5: %08lx l6: %08lx l7: %08lx\n",
sf->locals[0], sf->locals[1], sf->locals[2], sf->locals[3],
sf->locals[4], sf->locals[5], sf->locals[6], sf->locals[7]);
printk("i0: %08lx i1: %08lx i2: %08lx i3: %08lx "
"i4: %08lx i5: %08lx fp: %08lx i7: %08lx\n",
sf->ins[0], sf->ins[1], sf->ins[2], sf->ins[3],
sf->ins[4], sf->ins[5], (unsigned long)sf->fp, sf->callers_pc);
printk("sp: %08lx x0: %08lx x1: %08lx x2: %08lx "
"x3: %08lx x4: %08lx x5: %08lx xx: %08lx\n",
(unsigned long)sf->structptr, sf->xargs[0], sf->xargs[1],
sf->xargs[2], sf->xargs[3], sf->xargs[4], sf->xargs[5],
sf->xxargs[0]);
size = ((unsigned long)sf->fp) - ((unsigned long)sf);
size -= STACKFRAME_SZ;
stk = (unsigned long *)((unsigned long)sf + STACKFRAME_SZ);
i = 0;
do {
printk("s%d: %08lx\n", i++, *stk++);
} while ((size -= sizeof(unsigned long)));
}
#endif
void show_regs(struct pt_regs *r)
{
struct reg_window32 *rw = (struct reg_window32 *) r->u_regs[14];

View File

@ -128,13 +128,12 @@ rtrap_patch2: and %glob_tmp, 0xff, %glob_tmp
wr %glob_tmp, 0x0, %wim
/* Here comes the architecture specific
* branch to the user stack checking routine
* for return from traps.
*/
.globl rtrap_mmu_patchme
rtrap_mmu_patchme: b sun4c_rett_stackchk
andcc %fp, 0x7, %g0
/* Here comes the architecture specific
* branch to the user stack checking routine
* for return from traps.
*/
b srmmu_rett_stackchk
andcc %fp, 0x7, %g0
ret_trap_userwins_ok:
LOAD_PT_PRIV(sp, t_psr, t_pc, t_npc)
@ -225,69 +224,6 @@ ret_trap_user_stack_is_bolixed:
b signal_p
ld [%curptr + TI_FLAGS], %g2
sun4c_rett_stackchk:
be 1f
and %fp, 0xfff, %g1 ! delay slot
b ret_trap_user_stack_is_bolixed + 0x4
wr %t_wim, 0x0, %wim
/* See if we have to check the sanity of one page or two */
1:
add %g1, 0x38, %g1
sra %fp, 29, %g2
add %g2, 0x1, %g2
andncc %g2, 0x1, %g0
be 1f
andncc %g1, 0xff8, %g0
/* %sp is in vma hole, yuck */
b ret_trap_user_stack_is_bolixed + 0x4
wr %t_wim, 0x0, %wim
1:
be sun4c_rett_onepage /* Only one page to check */
lda [%fp] ASI_PTE, %g2
sun4c_rett_twopages:
add %fp, 0x38, %g1
sra %g1, 29, %g2
add %g2, 0x1, %g2
andncc %g2, 0x1, %g0
be 1f
lda [%g1] ASI_PTE, %g2
/* Second page is in vma hole */
b ret_trap_user_stack_is_bolixed + 0x4
wr %t_wim, 0x0, %wim
1:
srl %g2, 29, %g2
andcc %g2, 0x4, %g0
bne sun4c_rett_onepage
lda [%fp] ASI_PTE, %g2
/* Second page has bad perms */
b ret_trap_user_stack_is_bolixed + 0x4
wr %t_wim, 0x0, %wim
sun4c_rett_onepage:
srl %g2, 29, %g2
andcc %g2, 0x4, %g0
bne,a 1f
restore %g0, %g0, %g0
/* A page had bad page permissions, losing... */
b ret_trap_user_stack_is_bolixed + 0x4
wr %t_wim, 0x0, %wim
/* Whee, things are ok, load the window and continue. */
1:
LOAD_WINDOW(sp)
b ret_trap_userwins_ok
save %g0, %g0, %g0
.globl srmmu_rett_stackchk
srmmu_rett_stackchk:
bne ret_trap_user_stack_is_bolixed

View File

@ -73,18 +73,8 @@ rtrap_nmi: ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
.globl rtrap_irq, rtrap, irqsz_patchme, rtrap_xcall
rtrap_irq:
rtrap:
#ifndef CONFIG_SMP
sethi %hi(__cpu_data), %l0
lduw [%l0 + %lo(__cpu_data)], %l1
#else
sethi %hi(__cpu_data), %l0
or %l0, %lo(__cpu_data), %l0
lduw [%l0 + %g5], %l1
#endif
cmp %l1, 0
/* mm/ultra.S:xcall_report_regs KNOWS about this load. */
ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
rtrap_xcall:
sethi %hi(0xf << 20), %l4
and %l1, %l4, %l4

View File

@ -42,7 +42,6 @@
#include <asm/vaddrs.h>
#include <asm/mbus.h>
#include <asm/idprom.h>
#include <asm/machines.h>
#include <asm/cpudata.h>
#include <asm/setup.h>
#include <asm/cacheflush.h>
@ -106,7 +105,6 @@ unsigned long cmdline_memory_size __initdata = 0;
/* which CPU booted us (0xff = not set) */
unsigned char boot_cpu_id = 0xff; /* 0xff will make it into DATA section... */
unsigned char boot_cpu_id4; /* boot_cpu_id << 2 */
static void
prom_console_write(struct console *con, const char *s, unsigned n)
@ -182,13 +180,6 @@ static void __init boot_flags_init(char *commands)
}
}
/* This routine will in the future do all the nasty prom stuff
* to probe for the mmu type and its parameters, etc. This will
* also be where SMP things happen.
*/
extern void sun4c_probe_vac(void);
extern unsigned short root_flags;
extern unsigned short root_dev;
extern unsigned short ram_flags;
@ -200,6 +191,52 @@ extern int root_mountflags;
char reboot_command[COMMAND_LINE_SIZE];
struct cpuid_patch_entry {
unsigned int addr;
unsigned int sun4d[3];
unsigned int leon[3];
};
extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end;
static void __init per_cpu_patch(void)
{
struct cpuid_patch_entry *p;
if (sparc_cpu_model == sun4m) {
/* Nothing to do, this is what the unpatched code
* targets.
*/
return;
}
p = &__cpuid_patch;
while (p < &__cpuid_patch_end) {
unsigned long addr = p->addr;
unsigned int *insns;
switch (sparc_cpu_model) {
case sun4d:
insns = &p->sun4d[0];
break;
case sparc_leon:
insns = &p->leon[0];
break;
default:
prom_printf("Unknown cpu type, halting.\n");
prom_halt();
}
*(unsigned int *) (addr + 0) = insns[0];
flushi(addr + 0);
*(unsigned int *) (addr + 4) = insns[1];
flushi(addr + 4);
*(unsigned int *) (addr + 8) = insns[2];
flushi(addr + 8);
p++;
}
}
enum sparc_cpu sparc_cpu_model;
EXPORT_SYMBOL(sparc_cpu_model);
@ -225,10 +262,6 @@ void __init setup_arch(char **cmdline_p)
/* Set sparc_cpu_model */
sparc_cpu_model = sun_unknown;
if (!strcmp(&cputypval[0], "sun4 "))
sparc_cpu_model = sun4;
if (!strcmp(&cputypval[0], "sun4c"))
sparc_cpu_model = sun4c;
if (!strcmp(&cputypval[0], "sun4m"))
sparc_cpu_model = sun4m;
if (!strcmp(&cputypval[0], "sun4s"))
@ -244,12 +277,6 @@ void __init setup_arch(char **cmdline_p)
printk("ARCH: ");
switch(sparc_cpu_model) {
case sun4:
printk("SUN4\n");
break;
case sun4c:
printk("SUN4C\n");
break;
case sun4m:
printk("SUN4M\n");
break;
@ -275,8 +302,6 @@ void __init setup_arch(char **cmdline_p)
#endif
idprom_init();
if (ARCH_SUN4C)
sun4c_probe_vac();
load_mmu();
phys_base = 0xffffffffUL;
@ -313,6 +338,9 @@ void __init setup_arch(char **cmdline_p)
init_mm.context = (unsigned long) NO_CONTEXT;
init_task.thread.kregs = &fake_swapper_regs;
/* Run-time patch instructions to match the cpu model */
per_cpu_patch();
paging_init();
smp_setup_cpu_possible_map();

View File

@ -217,12 +217,9 @@ segv:
/* Checks if the fp is valid */
static inline int invalid_frame_pointer(void __user *fp, int fplen)
{
if ((((unsigned long) fp) & 7) ||
!__access_ok((unsigned long)fp, fplen) ||
((sparc_cpu_model == sun4 || sparc_cpu_model == sun4c) &&
((unsigned long) fp < 0xe0000000 && (unsigned long) fp >= 0x20000000)))
if ((((unsigned long) fp) & 7) || !__access_ok((unsigned long)fp, fplen))
return 1;
return 0;
}

View File

@ -40,6 +40,8 @@ volatile unsigned long cpu_callin_map[NR_CPUS] __cpuinitdata = {0,};
cpumask_t smp_commenced_mask = CPU_MASK_NONE;
const struct sparc32_ipi_ops *sparc32_ipi_ops;
/* The only guaranteed locking primitive available on all Sparc
* processors is 'ldstub [%reg + immediate], %dest_reg' which atomically
* places the current byte at the effective address into dest_reg and
@ -85,14 +87,6 @@ void __init smp_cpus_done(unsigned int max_cpus)
(bogosum/(5000/HZ))%100);
switch(sparc_cpu_model) {
case sun4:
printk("SUN4\n");
BUG();
break;
case sun4c:
printk("SUN4C\n");
BUG();
break;
case sun4m:
smp4m_smp_done();
break;
@ -132,7 +126,7 @@ void smp_send_reschedule(int cpu)
* a single CPU. The trap handler needs only to do trap entry/return
* to call schedule.
*/
BTFIXUP_CALL(smp_ipi_resched)(cpu);
sparc32_ipi_ops->resched(cpu);
}
void smp_send_stop(void)
@ -142,7 +136,7 @@ void smp_send_stop(void)
void arch_send_call_function_single_ipi(int cpu)
{
/* trigger one IPI single call on one CPU */
BTFIXUP_CALL(smp_ipi_single)(cpu);
sparc32_ipi_ops->single(cpu);
}
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
@ -151,7 +145,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
/* trigger IPI mask call on each CPU */
for_each_cpu(cpu, mask)
BTFIXUP_CALL(smp_ipi_mask_one)(cpu);
sparc32_ipi_ops->mask_one(cpu);
}
void smp_resched_interrupt(void)
@ -179,150 +173,9 @@ void smp_call_function_interrupt(void)
irq_exit();
}
void smp_flush_cache_all(void)
{
xc0((smpfunc_t) BTFIXUP_CALL(local_flush_cache_all));
local_flush_cache_all();
}
void smp_flush_tlb_all(void)
{
xc0((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_all));
local_flush_tlb_all();
}
void smp_flush_cache_mm(struct mm_struct *mm)
{
if(mm->context != NO_CONTEXT) {
cpumask_t cpu_mask;
cpumask_copy(&cpu_mask, mm_cpumask(mm));
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
if (!cpumask_empty(&cpu_mask))
xc1((smpfunc_t) BTFIXUP_CALL(local_flush_cache_mm), (unsigned long) mm);
local_flush_cache_mm(mm);
}
}
void smp_flush_tlb_mm(struct mm_struct *mm)
{
if(mm->context != NO_CONTEXT) {
cpumask_t cpu_mask;
cpumask_copy(&cpu_mask, mm_cpumask(mm));
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
if (!cpumask_empty(&cpu_mask)) {
xc1((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_mm), (unsigned long) mm);
if(atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
cpumask_copy(mm_cpumask(mm),
cpumask_of(smp_processor_id()));
}
local_flush_tlb_mm(mm);
}
}
void smp_flush_cache_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
if (mm->context != NO_CONTEXT) {
cpumask_t cpu_mask;
cpumask_copy(&cpu_mask, mm_cpumask(mm));
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
if (!cpumask_empty(&cpu_mask))
xc3((smpfunc_t) BTFIXUP_CALL(local_flush_cache_range), (unsigned long) vma, start, end);
local_flush_cache_range(vma, start, end);
}
}
void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
if (mm->context != NO_CONTEXT) {
cpumask_t cpu_mask;
cpumask_copy(&cpu_mask, mm_cpumask(mm));
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
if (!cpumask_empty(&cpu_mask))
xc3((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_range), (unsigned long) vma, start, end);
local_flush_tlb_range(vma, start, end);
}
}
void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
{
struct mm_struct *mm = vma->vm_mm;
if(mm->context != NO_CONTEXT) {
cpumask_t cpu_mask;
cpumask_copy(&cpu_mask, mm_cpumask(mm));
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
if (!cpumask_empty(&cpu_mask))
xc2((smpfunc_t) BTFIXUP_CALL(local_flush_cache_page), (unsigned long) vma, page);
local_flush_cache_page(vma, page);
}
}
void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
struct mm_struct *mm = vma->vm_mm;
if(mm->context != NO_CONTEXT) {
cpumask_t cpu_mask;
cpumask_copy(&cpu_mask, mm_cpumask(mm));
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
if (!cpumask_empty(&cpu_mask))
xc2((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_page), (unsigned long) vma, page);
local_flush_tlb_page(vma, page);
}
}
void smp_flush_page_to_ram(unsigned long page)
{
/* Current theory is that those who call this are the one's
* who have just dirtied their cache with the pages contents
* in kernel space, therefore we only run this on local cpu.
*
* XXX This experiment failed, research further... -DaveM
*/
#if 1
xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_to_ram), page);
#endif
local_flush_page_to_ram(page);
}
void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
{
cpumask_t cpu_mask;
cpumask_copy(&cpu_mask, mm_cpumask(mm));
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
if (!cpumask_empty(&cpu_mask))
xc2((smpfunc_t) BTFIXUP_CALL(local_flush_sig_insns), (unsigned long) mm, insn_addr);
local_flush_sig_insns(mm, insn_addr);
}
extern unsigned int lvl14_resolution;
/* /proc/profile writes can call this, don't __init it please. */
static DEFINE_SPINLOCK(prof_setup_lock);
int setup_profiling_timer(unsigned int multiplier)
{
int i;
unsigned long flags;
/* Prevent level14 ticker IRQ flooding. */
if((!multiplier) || (lvl14_resolution / multiplier) < 500)
return -EINVAL;
spin_lock_irqsave(&prof_setup_lock, flags);
for_each_possible_cpu(i) {
load_profile_irq(i, lvl14_resolution / multiplier);
prof_multiplier(i) = multiplier;
}
spin_unlock_irqrestore(&prof_setup_lock, flags);
return 0;
return -EINVAL;
}
void __init smp_prepare_cpus(unsigned int max_cpus)
@ -345,14 +198,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
smp_store_cpu_info(boot_cpu_id);
switch(sparc_cpu_model) {
case sun4:
printk("SUN4\n");
BUG();
break;
case sun4c:
printk("SUN4C\n");
BUG();
break;
case sun4m:
smp4m_boot_cpus();
break;
@ -418,14 +263,6 @@ int __cpuinit __cpu_up(unsigned int cpu)
int ret=0;
switch(sparc_cpu_model) {
case sun4:
printk("SUN4\n");
BUG();
break;
case sun4c:
printk("SUN4C\n");
BUG();
break;
case sun4m:
ret = smp4m_boot_one_cpu(cpu);
break;

View File

@ -28,19 +28,5 @@ EXPORT_SYMBOL(__ndelay);
EXPORT_SYMBOL(__ret_efault);
EXPORT_SYMBOL(empty_zero_page);
/* Defined using magic */
#ifndef CONFIG_SMP
EXPORT_SYMBOL(BTFIXUP_CALL(___xchg32));
#else
EXPORT_SYMBOL(BTFIXUP_CALL(__hard_smp_processor_id));
#endif
EXPORT_SYMBOL(BTFIXUP_CALL(mmu_unlockarea));
EXPORT_SYMBOL(BTFIXUP_CALL(mmu_lockarea));
EXPORT_SYMBOL(BTFIXUP_CALL(mmu_get_scsi_sgl));
EXPORT_SYMBOL(BTFIXUP_CALL(mmu_get_scsi_one));
EXPORT_SYMBOL(BTFIXUP_CALL(mmu_release_scsi_sgl));
EXPORT_SYMBOL(BTFIXUP_CALL(mmu_release_scsi_one));
EXPORT_SYMBOL(BTFIXUP_CALL(pgprot_noncached));
/* Exporting a symbol from /init/main.c */
EXPORT_SYMBOL(saved_command_line);

View File

@ -1,264 +0,0 @@
/*
* sun4c irq support
*
* djhr: Hacked out of irq.c into a CPU dependent version.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
* Copyright (C) 1995 Pete A. Zaitcev (zaitcev@yahoo.com)
* Copyright (C) 1996 Dave Redman (djhr@tadpole.co.uk)
*/
#include <linux/init.h>
#include <asm/oplib.h>
#include <asm/timer.h>
#include <asm/irq.h>
#include <asm/io.h>
#include "irq.h"
/* Sun4c interrupts are typically laid out as follows:
*
* 1 - Software interrupt, SBUS level 1
* 2 - SBUS level 2
* 3 - ESP SCSI, SBUS level 3
* 4 - Software interrupt
* 5 - Lance ethernet, SBUS level 4
* 6 - Software interrupt
* 7 - Graphics card, SBUS level 5
* 8 - SBUS level 6
* 9 - SBUS level 7
* 10 - Counter timer
* 11 - Floppy
* 12 - Zilog uart
* 13 - CS4231 audio
* 14 - Profiling timer
* 15 - NMI
*
* The interrupt enable bits in the interrupt mask register are
* really only used to enable/disable the timer interrupts, and
* for signalling software interrupts. There is also a master
* interrupt enable bit in this register.
*
* Interrupts are enabled by setting the SUN4C_INT_* bits, they
* are disabled by clearing those bits.
*/
/*
* Bit field defines for the interrupt registers on various
* Sparc machines.
*/
/* The sun4c interrupt register. */
#define SUN4C_INT_ENABLE 0x01 /* Allow interrupts. */
#define SUN4C_INT_E14 0x80 /* Enable level 14 IRQ. */
#define SUN4C_INT_E10 0x20 /* Enable level 10 IRQ. */
#define SUN4C_INT_E8 0x10 /* Enable level 8 IRQ. */
#define SUN4C_INT_E6 0x08 /* Enable level 6 IRQ. */
#define SUN4C_INT_E4 0x04 /* Enable level 4 IRQ. */
#define SUN4C_INT_E1 0x02 /* Enable level 1 IRQ. */
/*
* Pointer to the interrupt enable byte
* Used by entry.S
*/
unsigned char __iomem *interrupt_enable;
static void sun4c_mask_irq(struct irq_data *data)
{
unsigned long mask = (unsigned long)data->chip_data;
if (mask) {
unsigned long flags;
local_irq_save(flags);
mask = sbus_readb(interrupt_enable) & ~mask;
sbus_writeb(mask, interrupt_enable);
local_irq_restore(flags);
}
}
static void sun4c_unmask_irq(struct irq_data *data)
{
unsigned long mask = (unsigned long)data->chip_data;
if (mask) {
unsigned long flags;
local_irq_save(flags);
mask = sbus_readb(interrupt_enable) | mask;
sbus_writeb(mask, interrupt_enable);
local_irq_restore(flags);
}
}
static unsigned int sun4c_startup_irq(struct irq_data *data)
{
irq_link(data->irq);
sun4c_unmask_irq(data);
return 0;
}
static void sun4c_shutdown_irq(struct irq_data *data)
{
sun4c_mask_irq(data);
irq_unlink(data->irq);
}
static struct irq_chip sun4c_irq = {
.name = "sun4c",
.irq_startup = sun4c_startup_irq,
.irq_shutdown = sun4c_shutdown_irq,
.irq_mask = sun4c_mask_irq,
.irq_unmask = sun4c_unmask_irq,
};
static unsigned int sun4c_build_device_irq(struct platform_device *op,
unsigned int real_irq)
{
unsigned int irq;
if (real_irq >= 16) {
prom_printf("Bogus sun4c IRQ %u\n", real_irq);
prom_halt();
}
irq = irq_alloc(real_irq, real_irq);
if (irq) {
unsigned long mask = 0UL;
switch (real_irq) {
case 1:
mask = SUN4C_INT_E1;
break;
case 8:
mask = SUN4C_INT_E8;
break;
case 10:
mask = SUN4C_INT_E10;
break;
case 14:
mask = SUN4C_INT_E14;
break;
default:
/* All the rest are either always enabled,
* or are for signalling software interrupts.
*/
break;
}
irq_set_chip_and_handler_name(irq, &sun4c_irq,
handle_level_irq, "level");
irq_set_chip_data(irq, (void *)mask);
}
return irq;
}
struct sun4c_timer_info {
u32 l10_count;
u32 l10_limit;
u32 l14_count;
u32 l14_limit;
};
static struct sun4c_timer_info __iomem *sun4c_timers;
static void sun4c_clear_clock_irq(void)
{
sbus_readl(&sun4c_timers->l10_limit);
}
static void sun4c_load_profile_irq(int cpu, unsigned int limit)
{
/* Errm.. not sure how to do this.. */
}
static void __init sun4c_init_timers(irq_handler_t counter_fn)
{
const struct linux_prom_irqs *prom_irqs;
struct device_node *dp;
unsigned int irq;
const u32 *addr;
int err;
dp = of_find_node_by_name(NULL, "counter-timer");
if (!dp) {
prom_printf("sun4c_init_timers: Unable to find counter-timer\n");
prom_halt();
}
addr = of_get_property(dp, "address", NULL);
if (!addr) {
prom_printf("sun4c_init_timers: No address property\n");
prom_halt();
}
sun4c_timers = (void __iomem *) (unsigned long) addr[0];
prom_irqs = of_get_property(dp, "intr", NULL);
of_node_put(dp);
if (!prom_irqs) {
prom_printf("sun4c_init_timers: No intr property\n");
prom_halt();
}
/* Have the level 10 timer tick at 100HZ. We don't touch the
* level 14 timer limit since we are letting the prom handle
* them until we have a real console driver so L1-A works.
*/
sbus_writel((((1000000/HZ) + 1) << 10), &sun4c_timers->l10_limit);
master_l10_counter = &sun4c_timers->l10_count;
irq = sun4c_build_device_irq(NULL, prom_irqs[0].pri);
err = request_irq(irq, counter_fn, IRQF_TIMER, "timer", NULL);
if (err) {
prom_printf("sun4c_init_timers: request_irq() fails with %d\n", err);
prom_halt();
}
/* disable timer interrupt */
sun4c_mask_irq(irq_get_irq_data(irq));
}
#ifdef CONFIG_SMP
static void sun4c_nop(void)
{
}
#endif
void __init sun4c_init_IRQ(void)
{
struct device_node *dp;
const u32 *addr;
dp = of_find_node_by_name(NULL, "interrupt-enable");
if (!dp) {
prom_printf("sun4c_init_IRQ: Unable to find interrupt-enable\n");
prom_halt();
}
addr = of_get_property(dp, "address", NULL);
of_node_put(dp);
if (!addr) {
prom_printf("sun4c_init_IRQ: No address property\n");
prom_halt();
}
interrupt_enable = (void __iomem *) (unsigned long) addr[0];
BTFIXUPSET_CALL(clear_clock_irq, sun4c_clear_clock_irq, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(load_profile_irq, sun4c_load_profile_irq, BTFIXUPCALL_NOP);
sparc_irq_config.init_timers = sun4c_init_timers;
sparc_irq_config.build_device_irq = sun4c_build_device_irq;
#ifdef CONFIG_SMP
BTFIXUPSET_CALL(set_cpu_int, sun4c_nop, BTFIXUPCALL_NOP);
BTFIXUPSET_CALL(clear_cpu_int, sun4c_nop, BTFIXUPCALL_NOP);
BTFIXUPSET_CALL(set_irq_udt, sun4c_nop, BTFIXUPCALL_NOP);
#endif
sbus_writeb(SUN4C_INT_ENABLE, interrupt_enable);
/* Cannot enable interrupts until OBP ticker is disabled. */
}

View File

@ -15,6 +15,7 @@
#include <asm/sbi.h>
#include <asm/cacheflush.h>
#include <asm/setup.h>
#include <asm/oplib.h>
#include "kernel.h"
#include "irq.h"
@ -243,19 +244,6 @@ struct irq_chip sun4d_irq = {
};
#ifdef CONFIG_SMP
static void sun4d_set_cpu_int(int cpu, int level)
{
sun4d_send_ipi(cpu, level);
}
static void sun4d_clear_ipi(int cpu, int level)
{
}
static void sun4d_set_udt(int cpu)
{
}
/* Setup IRQ distribution scheme. */
void __init sun4d_distribute_irqs(void)
{
@ -282,7 +270,8 @@ static void sun4d_clear_clock_irq(void)
static void sun4d_load_profile_irq(int cpu, unsigned int limit)
{
bw_set_prof_limit(cpu, limit);
unsigned int value = limit ? timer_value(limit) : 0;
bw_set_prof_limit(cpu, value);
}
static void __init sun4d_load_profile_irqs(void)
@ -418,12 +407,12 @@ static void __init sun4d_fixup_trap_table(void)
trap_table->inst_two = lvl14_save[1];
trap_table->inst_three = lvl14_save[2];
trap_table->inst_four = lvl14_save[3];
local_flush_cache_all();
local_ops->cache_all();
local_irq_restore(flags);
#endif
}
static void __init sun4d_init_timers(irq_handler_t counter_fn)
static void __init sun4d_init_timers(void)
{
struct device_node *dp;
struct resource res;
@ -466,12 +455,20 @@ static void __init sun4d_init_timers(irq_handler_t counter_fn)
prom_halt();
}
sbus_writel((((1000000/HZ) + 1) << 10), &sun4d_timers->l10_timer_limit);
#ifdef CONFIG_SMP
sparc_config.cs_period = SBUS_CLOCK_RATE * 2; /* 2 seconds */
#else
sparc_config.cs_period = SBUS_CLOCK_RATE / HZ; /* 1/HZ sec */
sparc_config.features |= FEAT_L10_CLOCKEVENT;
#endif
sparc_config.features |= FEAT_L10_CLOCKSOURCE;
sbus_writel(timer_value(sparc_config.cs_period),
&sun4d_timers->l10_timer_limit);
master_l10_counter = &sun4d_timers->l10_cur_count;
irq = sun4d_build_timer_irq(board, SUN4D_TIMER_IRQ);
err = request_irq(irq, counter_fn, IRQF_TIMER, "timer", NULL);
err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL);
if (err) {
prom_printf("sun4d_init_timers: request_irq() failed with %d\n",
err);
@ -509,16 +506,11 @@ void __init sun4d_init_IRQ(void)
{
local_irq_disable();
BTFIXUPSET_CALL(clear_clock_irq, sun4d_clear_clock_irq, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(load_profile_irq, sun4d_load_profile_irq, BTFIXUPCALL_NORM);
sparc_config.init_timers = sun4d_init_timers;
sparc_config.build_device_irq = sun4d_build_device_irq;
sparc_config.clock_rate = SBUS_CLOCK_RATE;
sparc_config.clear_clock_irq = sun4d_clear_clock_irq;
sparc_config.load_profile_irq = sun4d_load_profile_irq;
sparc_irq_config.init_timers = sun4d_init_timers;
sparc_irq_config.build_device_irq = sun4d_build_device_irq;
#ifdef CONFIG_SMP
BTFIXUPSET_CALL(set_cpu_int, sun4d_set_cpu_int, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(clear_cpu_int, sun4d_clear_ipi, BTFIXUPCALL_NOP);
BTFIXUPSET_CALL(set_irq_udt, sun4d_set_udt, BTFIXUPCALL_NOP);
#endif
/* Cannot enable interrupts until OBP ticker is disabled. */
}

View File

@ -6,16 +6,20 @@
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/profile.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/cpu.h>
#include <asm/cacheflush.h>
#include <asm/switch_to.h>
#include <asm/tlbflush.h>
#include <asm/timer.h>
#include <asm/oplib.h>
#include <asm/sbi.h>
#include <asm/mmu.h>
#include <asm/tlbflush.h>
#include <asm/switch_to.h>
#include <asm/cacheflush.h>
#include "kernel.h"
#include "irq.h"
@ -34,7 +38,6 @@ static inline unsigned long sun4d_swap(volatile unsigned long *ptr, unsigned lon
}
static void smp4d_ipi_init(void);
static void smp_setup_percpu_timer(void);
static unsigned char cpu_leds[32];
@ -49,7 +52,7 @@ static inline void show_leds(int cpuid)
void __cpuinit smp4d_callin(void)
{
int cpuid = hard_smp4d_processor_id();
int cpuid = hard_smp_processor_id();
unsigned long flags;
/* Show we are alive */
@ -59,8 +62,8 @@ void __cpuinit smp4d_callin(void)
/* Enable level15 interrupt, disable level14 interrupt for now */
cc_set_imsk((cc_get_imsk() & ~0x8000) | 0x4000);
local_flush_cache_all();
local_flush_tlb_all();
local_ops->cache_all();
local_ops->tlb_all();
notify_cpu_starting(cpuid);
/*
@ -70,17 +73,17 @@ void __cpuinit smp4d_callin(void)
* to call the scheduler code.
*/
/* Get our local ticker going. */
smp_setup_percpu_timer();
register_percpu_ce(cpuid);
calibrate_delay();
smp_store_cpu_info(cpuid);
local_flush_cache_all();
local_flush_tlb_all();
local_ops->cache_all();
local_ops->tlb_all();
/* Allow master to continue. */
sun4d_swap((unsigned long *)&cpu_callin_map[cpuid], 1);
local_flush_cache_all();
local_flush_tlb_all();
local_ops->cache_all();
local_ops->tlb_all();
while ((unsigned long)current_set[cpuid] < PAGE_OFFSET)
barrier();
@ -100,8 +103,8 @@ void __cpuinit smp4d_callin(void)
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
local_flush_cache_all();
local_flush_tlb_all();
local_ops->cache_all();
local_ops->tlb_all();
local_irq_enable(); /* We don't allow PIL 14 yet */
@ -123,8 +126,7 @@ void __init smp4d_boot_cpus(void)
smp4d_ipi_init();
if (boot_cpu_id)
current_set[0] = NULL;
smp_setup_percpu_timer();
local_flush_cache_all();
local_ops->cache_all();
}
int __cpuinit smp4d_boot_one_cpu(int i)
@ -150,7 +152,7 @@ int __cpuinit smp4d_boot_one_cpu(int i)
/* whirrr, whirrr, whirrrrrrrrr... */
printk(KERN_INFO "Starting CPU %d at %p\n", i, entry);
local_flush_cache_all();
local_ops->cache_all();
prom_startcpu(cpu_node,
&smp_penguin_ctable, 0, (char *)entry);
@ -168,7 +170,7 @@ int __cpuinit smp4d_boot_one_cpu(int i)
return -ENODEV;
}
local_flush_cache_all();
local_ops->cache_all();
return 0;
}
@ -185,7 +187,7 @@ void __init smp4d_smp_done(void)
prev = &cpu_data(i).next;
}
*prev = first;
local_flush_cache_all();
local_ops->cache_all();
/* Ok, they are spinning and ready to go. */
smp_processors_ready = 1;
@ -233,7 +235,20 @@ void sun4d_ipi_interrupt(void)
}
}
static void smp4d_ipi_single(int cpu)
/* +-------+-------------+-----------+------------------------------------+
* | bcast | devid | sid | levels mask |
* +-------+-------------+-----------+------------------------------------+
* 31 30 23 22 15 14 0
*/
#define IGEN_MESSAGE(bcast, devid, sid, levels) \
(((bcast) << 31) | ((devid) << 23) | ((sid) << 15) | (levels))
static void sun4d_send_ipi(int cpu, int level)
{
cc_set_igen(IGEN_MESSAGE(0, cpu << 3, 6 + ((level >> 1) & 7), 1 << (level - 1)));
}
static void sun4d_ipi_single(int cpu)
{
struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu);
@ -244,7 +259,7 @@ static void smp4d_ipi_single(int cpu)
sun4d_send_ipi(cpu, SUN4D_IPI_IRQ);
}
static void smp4d_ipi_mask_one(int cpu)
static void sun4d_ipi_mask_one(int cpu)
{
struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu);
@ -255,7 +270,7 @@ static void smp4d_ipi_mask_one(int cpu)
sun4d_send_ipi(cpu, SUN4D_IPI_IRQ);
}
static void smp4d_ipi_resched(int cpu)
static void sun4d_ipi_resched(int cpu)
{
struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu);
@ -280,7 +295,7 @@ static struct smp_funcall {
static DEFINE_SPINLOCK(cross_call_lock);
/* Cross calls must be serialized, at least currently. */
static void smp4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
static void sun4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
unsigned long arg2, unsigned long arg3,
unsigned long arg4)
{
@ -352,7 +367,7 @@ static void smp4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
/* Running cross calls. */
void smp4d_cross_call_irq(void)
{
int i = hard_smp4d_processor_id();
int i = hard_smp_processor_id();
ccall_info.processors_in[i] = 1;
ccall_info.func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3,
@ -363,7 +378,8 @@ void smp4d_cross_call_irq(void)
void smp4d_percpu_timer_interrupt(struct pt_regs *regs)
{
struct pt_regs *old_regs;
int cpu = hard_smp4d_processor_id();
int cpu = hard_smp_processor_id();
struct clock_event_device *ce;
static int cpu_tick[NR_CPUS];
static char led_mask[] = { 0xe, 0xd, 0xb, 0x7, 0xb, 0xd };
@ -379,45 +395,21 @@ void smp4d_percpu_timer_interrupt(struct pt_regs *regs)
show_leds(cpu);
}
profile_tick(CPU_PROFILING);
ce = &per_cpu(sparc32_clockevent, cpu);
if (!--prof_counter(cpu)) {
int user = user_mode(regs);
irq_enter();
ce->event_handler(ce);
irq_exit();
irq_enter();
update_process_times(user);
irq_exit();
prof_counter(cpu) = prof_multiplier(cpu);
}
set_irq_regs(old_regs);
}
static void __cpuinit smp_setup_percpu_timer(void)
{
int cpu = hard_smp4d_processor_id();
prof_counter(cpu) = prof_multiplier(cpu) = 1;
load_profile_irq(cpu, lvl14_resolution);
}
void __init smp4d_blackbox_id(unsigned *addr)
{
int rd = *addr & 0x3e000000;
addr[0] = 0xc0800800 | rd; /* lda [%g0] ASI_M_VIKING_TMP1, reg */
addr[1] = 0x01000000; /* nop */
addr[2] = 0x01000000; /* nop */
}
void __init smp4d_blackbox_current(unsigned *addr)
{
int rd = *addr & 0x3e000000;
addr[0] = 0xc0800800 | rd; /* lda [%g0] ASI_M_VIKING_TMP1, reg */
addr[2] = 0x81282002 | rd | (rd >> 11); /* sll reg, 2, reg */
addr[4] = 0x01000000; /* nop */
}
static const struct sparc32_ipi_ops sun4d_ipi_ops = {
.cross_call = sun4d_cross_call,
.resched = sun4d_ipi_resched,
.single = sun4d_ipi_single,
.mask_one = sun4d_ipi_mask_one,
};
void __init sun4d_init_smp(void)
{
@ -426,14 +418,7 @@ void __init sun4d_init_smp(void)
/* Patch ipi15 trap table */
t_nmi[1] = t_nmi[1] + (linux_trap_ipi15_sun4d - linux_trap_ipi15_sun4m);
/* And set btfixup... */
BTFIXUPSET_BLACKBOX(hard_smp_processor_id, smp4d_blackbox_id);
BTFIXUPSET_BLACKBOX(load_current, smp4d_blackbox_current);
BTFIXUPSET_CALL(smp_cross_call, smp4d_cross_call, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(__hard_smp_processor_id, __smp4d_processor_id, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(smp_ipi_resched, smp4d_ipi_resched, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(smp_ipi_single, smp4d_ipi_single, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(smp_ipi_mask_one, smp4d_ipi_mask_one, BTFIXUPCALL_NORM);
sparc32_ipi_ops = &sun4d_ipi_ops;
for (i = 0; i < NR_CPUS; i++) {
ccall_info.processors_in[i] = 1;

View File

@ -112,9 +112,6 @@ struct sun4m_handler_data {
#define SUN4M_INT_E14 0x00000080
#define SUN4M_INT_E10 0x00080000
#define SUN4M_HARD_INT(x) (0x000000001 << (x))
#define SUN4M_SOFT_INT(x) (0x000010000 << (x))
#define SUN4M_INT_MASKALL 0x80000000 /* mask all interrupts */
#define SUN4M_INT_MODULE_ERR 0x40000000 /* module error */
#define SUN4M_INT_M2S_WRITE_ERR 0x20000000 /* write buffer error */
@ -282,23 +279,6 @@ out:
return irq;
}
#ifdef CONFIG_SMP
static void sun4m_send_ipi(int cpu, int level)
{
sbus_writel(SUN4M_SOFT_INT(level), &sun4m_irq_percpu[cpu]->set);
}
static void sun4m_clear_ipi(int cpu, int level)
{
sbus_writel(SUN4M_SOFT_INT(level), &sun4m_irq_percpu[cpu]->clear);
}
static void sun4m_set_udt(int cpu)
{
sbus_writel(cpu, &sun4m_irq_global->interrupt_target);
}
#endif
struct sun4m_timer_percpu {
u32 l14_limit;
u32 l14_count;
@ -318,9 +298,6 @@ struct sun4m_timer_global {
static struct sun4m_timer_global __iomem *timers_global;
unsigned int lvl14_resolution = (((1000000/HZ) + 1) << 10);
static void sun4m_clear_clock_irq(void)
{
sbus_readl(&timers_global->l10_limit);
@ -369,10 +346,11 @@ void sun4m_clear_profile_irq(int cpu)
static void sun4m_load_profile_irq(int cpu, unsigned int limit)
{
sbus_writel(limit, &timers_percpu[cpu]->l14_limit);
unsigned int value = limit ? timer_value(limit) : 0;
sbus_writel(value, &timers_percpu[cpu]->l14_limit);
}
static void __init sun4m_init_timers(irq_handler_t counter_fn)
static void __init sun4m_init_timers(void)
{
struct device_node *dp = of_find_node_by_name(NULL, "counter");
int i, err, len, num_cpu_timers;
@ -402,13 +380,22 @@ static void __init sun4m_init_timers(irq_handler_t counter_fn)
/* Every per-cpu timer works in timer mode */
sbus_writel(0x00000000, &timers_global->timer_config);
sbus_writel((((1000000/HZ) + 1) << 10), &timers_global->l10_limit);
#ifdef CONFIG_SMP
sparc_config.cs_period = SBUS_CLOCK_RATE * 2; /* 2 seconds */
sparc_config.features |= FEAT_L14_ONESHOT;
#else
sparc_config.cs_period = SBUS_CLOCK_RATE / HZ; /* 1/HZ sec */
sparc_config.features |= FEAT_L10_CLOCKEVENT;
#endif
sparc_config.features |= FEAT_L10_CLOCKSOURCE;
sbus_writel(timer_value(sparc_config.cs_period),
&timers_global->l10_limit);
master_l10_counter = &timers_global->l10_count;
irq = sun4m_build_device_irq(NULL, SUN4M_TIMER_IRQ);
err = request_irq(irq, counter_fn, IRQF_TIMER, "timer", NULL);
err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL);
if (err) {
printk(KERN_ERR "sun4m_init_timers: Register IRQ error %d.\n",
err);
@ -434,7 +421,7 @@ static void __init sun4m_init_timers(irq_handler_t counter_fn)
trap_table->inst_two = lvl14_save[1];
trap_table->inst_three = lvl14_save[2];
trap_table->inst_four = lvl14_save[3];
local_flush_cache_all();
local_ops->cache_all();
local_irq_restore(flags);
}
#endif
@ -475,17 +462,12 @@ void __init sun4m_init_IRQ(void)
if (num_cpu_iregs == 4)
sbus_writel(0, &sun4m_irq_global->interrupt_target);
BTFIXUPSET_CALL(clear_clock_irq, sun4m_clear_clock_irq, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(load_profile_irq, sun4m_load_profile_irq, BTFIXUPCALL_NORM);
sparc_config.init_timers = sun4m_init_timers;
sparc_config.build_device_irq = sun4m_build_device_irq;
sparc_config.clock_rate = SBUS_CLOCK_RATE;
sparc_config.clear_clock_irq = sun4m_clear_clock_irq;
sparc_config.load_profile_irq = sun4m_load_profile_irq;
sparc_irq_config.init_timers = sun4m_init_timers;
sparc_irq_config.build_device_irq = sun4m_build_device_irq;
#ifdef CONFIG_SMP
BTFIXUPSET_CALL(set_cpu_int, sun4m_send_ipi, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(clear_cpu_int, sun4m_clear_ipi, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(set_irq_udt, sun4m_set_udt, BTFIXUPCALL_NORM);
#endif
/* Cannot enable interrupts until OBP ticker is disabled. */
}

View File

@ -4,14 +4,18 @@
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/profile.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/cpu.h>
#include <asm/cacheflush.h>
#include <asm/switch_to.h>
#include <asm/tlbflush.h>
#include <asm/timer.h>
#include <asm/oplib.h>
#include "irq.h"
#include "kernel.h"
@ -30,26 +34,22 @@ swap_ulong(volatile unsigned long *ptr, unsigned long val)
return val;
}
static void smp4m_ipi_init(void);
static void smp_setup_percpu_timer(void);
void __cpuinit smp4m_callin(void)
{
int cpuid = hard_smp_processor_id();
local_flush_cache_all();
local_flush_tlb_all();
local_ops->cache_all();
local_ops->tlb_all();
notify_cpu_starting(cpuid);
/* Get our local ticker going. */
smp_setup_percpu_timer();
register_percpu_ce(cpuid);
calibrate_delay();
smp_store_cpu_info(cpuid);
local_flush_cache_all();
local_flush_tlb_all();
local_ops->cache_all();
local_ops->tlb_all();
/*
* Unblock the master CPU _only_ when the scheduler state
@ -61,8 +61,8 @@ void __cpuinit smp4m_callin(void)
swap_ulong(&cpu_callin_map[cpuid], 1);
/* XXX: What's up with all the flushes? */
local_flush_cache_all();
local_flush_tlb_all();
local_ops->cache_all();
local_ops->tlb_all();
/* Fix idle thread fields. */
__asm__ __volatile__("ld [%0], %%g6\n\t"
@ -86,9 +86,8 @@ void __cpuinit smp4m_callin(void)
*/
void __init smp4m_boot_cpus(void)
{
smp4m_ipi_init();
smp_setup_percpu_timer();
local_flush_cache_all();
sun4m_unmask_profile_irq();
local_ops->cache_all();
}
int __cpuinit smp4m_boot_one_cpu(int i)
@ -117,7 +116,7 @@ int __cpuinit smp4m_boot_one_cpu(int i)
/* whirrr, whirrr, whirrrrrrrrr... */
printk(KERN_INFO "Starting CPU %d at %p\n", i, entry);
local_flush_cache_all();
local_ops->cache_all();
prom_startcpu(cpu_node, &smp_penguin_ctable, 0, (char *)entry);
/* wheee... it's going... */
@ -132,7 +131,7 @@ int __cpuinit smp4m_boot_one_cpu(int i)
return -ENODEV;
}
local_flush_cache_all();
local_ops->cache_all();
return 0;
}
@ -149,30 +148,29 @@ void __init smp4m_smp_done(void)
prev = &cpu_data(i).next;
}
*prev = first;
local_flush_cache_all();
local_ops->cache_all();
/* Ok, they are spinning and ready to go. */
}
/* Initialize IPIs on the SUN4M SMP machine */
static void __init smp4m_ipi_init(void)
static void sun4m_send_ipi(int cpu, int level)
{
sbus_writel(SUN4M_SOFT_INT(level), &sun4m_irq_percpu[cpu]->set);
}
static void smp4m_ipi_resched(int cpu)
static void sun4m_ipi_resched(int cpu)
{
set_cpu_int(cpu, IRQ_IPI_RESCHED);
sun4m_send_ipi(cpu, IRQ_IPI_RESCHED);
}
static void smp4m_ipi_single(int cpu)
static void sun4m_ipi_single(int cpu)
{
set_cpu_int(cpu, IRQ_IPI_SINGLE);
sun4m_send_ipi(cpu, IRQ_IPI_SINGLE);
}
static void smp4m_ipi_mask_one(int cpu)
static void sun4m_ipi_mask_one(int cpu)
{
set_cpu_int(cpu, IRQ_IPI_MASK);
sun4m_send_ipi(cpu, IRQ_IPI_MASK);
}
static struct smp_funcall {
@ -189,7 +187,7 @@ static struct smp_funcall {
static DEFINE_SPINLOCK(cross_call_lock);
/* Cross calls must be serialized, at least currently. */
static void smp4m_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
static void sun4m_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
unsigned long arg2, unsigned long arg3,
unsigned long arg4)
{
@ -216,7 +214,7 @@ static void smp4m_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
if (cpumask_test_cpu(i, &mask)) {
ccall_info.processors_in[i] = 0;
ccall_info.processors_out[i] = 0;
set_cpu_int(i, IRQ_CROSS_CALL);
sun4m_send_ipi(i, IRQ_CROSS_CALL);
} else {
ccall_info.processors_in[i] = 1;
ccall_info.processors_out[i] = 1;
@ -260,64 +258,33 @@ void smp4m_cross_call_irq(void)
void smp4m_percpu_timer_interrupt(struct pt_regs *regs)
{
struct pt_regs *old_regs;
struct clock_event_device *ce;
int cpu = smp_processor_id();
old_regs = set_irq_regs(regs);
sun4m_clear_profile_irq(cpu);
ce = &per_cpu(sparc32_clockevent, cpu);
profile_tick(CPU_PROFILING);
if (ce->mode & CLOCK_EVT_MODE_PERIODIC)
sun4m_clear_profile_irq(cpu);
else
sparc_config.load_profile_irq(cpu, 0); /* Is this needless? */
if (!--prof_counter(cpu)) {
int user = user_mode(regs);
irq_enter();
ce->event_handler(ce);
irq_exit();
irq_enter();
update_process_times(user);
irq_exit();
prof_counter(cpu) = prof_multiplier(cpu);
}
set_irq_regs(old_regs);
}
static void __cpuinit smp_setup_percpu_timer(void)
{
int cpu = smp_processor_id();
prof_counter(cpu) = prof_multiplier(cpu) = 1;
load_profile_irq(cpu, lvl14_resolution);
if (cpu == boot_cpu_id)
sun4m_unmask_profile_irq();
}
static void __init smp4m_blackbox_id(unsigned *addr)
{
int rd = *addr & 0x3e000000;
int rs1 = rd >> 11;
addr[0] = 0x81580000 | rd; /* rd %tbr, reg */
addr[1] = 0x8130200c | rd | rs1; /* srl reg, 0xc, reg */
addr[2] = 0x80082003 | rd | rs1; /* and reg, 3, reg */
}
static void __init smp4m_blackbox_current(unsigned *addr)
{
int rd = *addr & 0x3e000000;
int rs1 = rd >> 11;
addr[0] = 0x81580000 | rd; /* rd %tbr, reg */
addr[2] = 0x8130200a | rd | rs1; /* srl reg, 0xa, reg */
addr[4] = 0x8008200c | rd | rs1; /* and reg, 0xc, reg */
}
static const struct sparc32_ipi_ops sun4m_ipi_ops = {
.cross_call = sun4m_cross_call,
.resched = sun4m_ipi_resched,
.single = sun4m_ipi_single,
.mask_one = sun4m_ipi_mask_one,
};
void __init sun4m_init_smp(void)
{
BTFIXUPSET_BLACKBOX(hard_smp_processor_id, smp4m_blackbox_id);
BTFIXUPSET_BLACKBOX(load_current, smp4m_blackbox_current);
BTFIXUPSET_CALL(smp_cross_call, smp4m_cross_call, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(__hard_smp_processor_id, __smp4m_processor_id, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(smp_ipi_resched, smp4m_ipi_resched, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(smp_ipi_single, smp4m_ipi_single, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(smp_ipi_mask_one, smp4m_ipi_mask_one, BTFIXUPCALL_NORM);
sparc32_ipi_ops = &sun4m_ipi_ops;
}

View File

@ -53,8 +53,6 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
/* See asm-sparc/uaccess.h */
if (len > TASK_SIZE - PAGE_SIZE)
return -ENOMEM;
if (ARCH_SUN4C && len > 0x20000000)
return -ENOMEM;
if (!addr)
addr = TASK_UNMAPPED_BASE;
@ -65,10 +63,6 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
/* At this point: (!vmm || addr < vmm->vm_end). */
if (ARCH_SUN4C && addr < 0xe0000000 && 0x20000000 - len < addr) {
addr = PAGE_OFFSET;
vmm = find_vma(current->mm, PAGE_OFFSET);
}
if (TASK_SIZE - PAGE_SIZE - len < addr)
return -ENOMEM;
if (!vmm || addr + len <= vmm->vm_start)
@ -99,11 +93,6 @@ out:
int sparc_mmap_check(unsigned long addr, unsigned long len)
{
if (ARCH_SUN4C &&
(len > 0x20000000 ||
(addr < 0xe0000000 && addr + len > 0x20000000)))
return -EINVAL;
/* See asm-sparc/uaccess.h */
if (len > TASK_SIZE - PAGE_SIZE || addr + len > TASK_SIZE - PAGE_SIZE)
return -EINVAL;

View File

@ -26,6 +26,8 @@
#include <linux/rtc.h>
#include <linux/rtc/m48t59.h>
#include <linux/timex.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/ioport.h>
@ -40,13 +42,24 @@
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/idprom.h>
#include <asm/machines.h>
#include <asm/page.h>
#include <asm/pcic.h>
#include <asm/irq_regs.h>
#include <asm/setup.h>
#include "irq.h"
static __cacheline_aligned_in_smp DEFINE_SEQLOCK(timer_cs_lock);
static __volatile__ u64 timer_cs_internal_counter = 0;
static char timer_cs_enabled = 0;
static struct clock_event_device timer_ce;
static char timer_ce_enabled = 0;
#ifdef CONFIG_SMP
DEFINE_PER_CPU(struct clock_event_device, sparc32_clockevent);
#endif
DEFINE_SPINLOCK(rtc_lock);
EXPORT_SYMBOL(rtc_lock);
@ -55,7 +68,6 @@ static int set_rtc_mmss(unsigned long);
unsigned long profile_pc(struct pt_regs *regs)
{
extern char __copy_user_begin[], __copy_user_end[];
extern char __atomic_begin[], __atomic_end[];
extern char __bzero_begin[], __bzero_end[];
unsigned long pc = regs->pc;
@ -63,8 +75,6 @@ unsigned long profile_pc(struct pt_regs *regs)
if (in_lock_functions(pc) ||
(pc >= (unsigned long) __copy_user_begin &&
pc < (unsigned long) __copy_user_end) ||
(pc >= (unsigned long) __atomic_begin &&
pc < (unsigned long) __atomic_end) ||
(pc >= (unsigned long) __bzero_begin &&
pc < (unsigned long) __bzero_end))
pc = regs->u_regs[UREG_RETPC];
@ -75,36 +85,168 @@ EXPORT_SYMBOL(profile_pc);
__volatile__ unsigned int *master_l10_counter;
u32 (*do_arch_gettimeoffset)(void);
int update_persistent_clock(struct timespec now)
{
return set_rtc_mmss(now.tv_sec);
}
/*
* timer_interrupt() needs to keep up the real-time clock,
* as well as call the "xtime_update()" routine every clocktick
*/
#define TICK_SIZE (tick_nsec / 1000)
static irqreturn_t timer_interrupt(int dummy, void *dev_id)
irqreturn_t notrace timer_interrupt(int dummy, void *dev_id)
{
#ifndef CONFIG_SMP
profile_tick(CPU_PROFILING);
#endif
if (timer_cs_enabled) {
write_seqlock(&timer_cs_lock);
timer_cs_internal_counter++;
sparc_config.clear_clock_irq();
write_sequnlock(&timer_cs_lock);
} else {
sparc_config.clear_clock_irq();
}
clear_clock_irq();
if (timer_ce_enabled)
timer_ce.event_handler(&timer_ce);
xtime_update(1);
#ifndef CONFIG_SMP
update_process_times(user_mode(get_irq_regs()));
#endif
return IRQ_HANDLED;
}
static void timer_ce_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
case CLOCK_EVT_MODE_RESUME:
timer_ce_enabled = 1;
break;
case CLOCK_EVT_MODE_SHUTDOWN:
timer_ce_enabled = 0;
break;
default:
break;
}
smp_mb();
}
static __init void setup_timer_ce(void)
{
struct clock_event_device *ce = &timer_ce;
BUG_ON(smp_processor_id() != boot_cpu_id);
ce->name = "timer_ce";
ce->rating = 100;
ce->features = CLOCK_EVT_FEAT_PERIODIC;
ce->set_mode = timer_ce_set_mode;
ce->cpumask = cpu_possible_mask;
ce->shift = 32;
ce->mult = div_sc(sparc_config.clock_rate, NSEC_PER_SEC,
ce->shift);
clockevents_register_device(ce);
}
static unsigned int sbus_cycles_offset(void)
{
unsigned int val, offset;
val = *master_l10_counter;
offset = (val >> TIMER_VALUE_SHIFT) & TIMER_VALUE_MASK;
/* Limit hit? */
if (val & TIMER_LIMIT_BIT)
offset += sparc_config.cs_period;
return offset;
}
static cycle_t timer_cs_read(struct clocksource *cs)
{
unsigned int seq, offset;
u64 cycles;
do {
seq = read_seqbegin(&timer_cs_lock);
cycles = timer_cs_internal_counter;
offset = sparc_config.get_cycles_offset();
} while (read_seqretry(&timer_cs_lock, seq));
/* Count absolute cycles */
cycles *= sparc_config.cs_period;
cycles += offset;
return cycles;
}
static struct clocksource timer_cs = {
.name = "timer_cs",
.rating = 100,
.read = timer_cs_read,
.mask = CLOCKSOURCE_MASK(64),
.shift = 2,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static __init int setup_timer_cs(void)
{
timer_cs_enabled = 1;
timer_cs.mult = clocksource_hz2mult(sparc_config.clock_rate,
timer_cs.shift);
return clocksource_register(&timer_cs);
}
#ifdef CONFIG_SMP
static void percpu_ce_setup(enum clock_event_mode mode,
struct clock_event_device *evt)
{
int cpu = __first_cpu(evt->cpumask);
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
sparc_config.load_profile_irq(cpu,
SBUS_CLOCK_RATE / HZ);
break;
case CLOCK_EVT_MODE_ONESHOT:
case CLOCK_EVT_MODE_SHUTDOWN:
case CLOCK_EVT_MODE_UNUSED:
sparc_config.load_profile_irq(cpu, 0);
break;
default:
break;
}
}
static int percpu_ce_set_next_event(unsigned long delta,
struct clock_event_device *evt)
{
int cpu = __first_cpu(evt->cpumask);
unsigned int next = (unsigned int)delta;
sparc_config.load_profile_irq(cpu, next);
return 0;
}
void register_percpu_ce(int cpu)
{
struct clock_event_device *ce = &per_cpu(sparc32_clockevent, cpu);
unsigned int features = CLOCK_EVT_FEAT_PERIODIC;
if (sparc_config.features & FEAT_L14_ONESHOT)
features |= CLOCK_EVT_FEAT_ONESHOT;
ce->name = "percpu_ce";
ce->rating = 200;
ce->features = features;
ce->set_mode = percpu_ce_setup;
ce->set_next_event = percpu_ce_set_next_event;
ce->cpumask = cpumask_of(cpu);
ce->shift = 32;
ce->mult = div_sc(sparc_config.clock_rate, NSEC_PER_SEC,
ce->shift);
ce->max_delta_ns = clockevent_delta2ns(sparc_config.clock_rate, ce);
ce->min_delta_ns = clockevent_delta2ns(100, ce);
clockevents_register_device(ce);
}
#endif
static unsigned char mostek_read_byte(struct device *dev, u32 ofs)
{
struct platform_device *pdev = to_platform_device(dev);
@ -195,38 +337,28 @@ static int __init clock_init(void)
*/
fs_initcall(clock_init);
u32 sbus_do_gettimeoffset(void)
static void __init sparc32_late_time_init(void)
{
unsigned long val = *master_l10_counter;
unsigned long usec = (val >> 10) & 0x1fffff;
/* Limit hit? */
if (val & 0x80000000)
usec += 1000000 / HZ;
return usec * 1000;
}
u32 arch_gettimeoffset(void)
{
if (unlikely(!do_arch_gettimeoffset))
return 0;
return do_arch_gettimeoffset();
if (sparc_config.features & FEAT_L10_CLOCKEVENT)
setup_timer_ce();
if (sparc_config.features & FEAT_L10_CLOCKSOURCE)
setup_timer_cs();
#ifdef CONFIG_SMP
register_percpu_ce(smp_processor_id());
#endif
}
static void __init sbus_time_init(void)
{
do_arch_gettimeoffset = sbus_do_gettimeoffset;
btfixup();
sparc_irq_config.init_timers(timer_interrupt);
sparc_config.get_cycles_offset = sbus_cycles_offset;
sparc_config.init_timers();
}
void __init time_init(void)
{
sparc_config.features = 0;
late_time_init = sparc32_late_time_init;
if (pcic_present())
pci_time_init();
else

View File

@ -15,8 +15,8 @@
#include <asm/contregs.h>
#include <asm/thread_info.h>
.globl sun4m_cpu_startup, __smp4m_processor_id, __leon_processor_id
.globl sun4d_cpu_startup, __smp4d_processor_id
.globl sun4m_cpu_startup
.globl sun4d_cpu_startup
__CPUINIT
.align 4
@ -94,24 +94,6 @@ smp_do_cpu_idle:
call cpu_panic
nop
__smp4m_processor_id:
rd %tbr, %g2
srl %g2, 12, %g2
and %g2, 3, %g2
retl
mov %g1, %o7
__smp4d_processor_id:
lda [%g0] ASI_M_VIKING_TMP1, %g2
retl
mov %g1, %o7
__leon_processor_id:
rd %asr17,%g2
srl %g2,28,%g2
retl
mov %g1, %o7
/* CPUID in bootbus can be found at PA 0xff0140000 */
#define SUN4D_BOOTBUS_CPUID 0xf0140000

View File

@ -120,8 +120,6 @@ void do_illegal_instruction(struct pt_regs *regs, unsigned long pc, unsigned lon
printk("Ill instr. at pc=%08lx instruction is %08lx\n",
regs->pc, *(unsigned long *)regs->pc);
#endif
if (!do_user_muldiv (regs, pc))
return;
info.si_signo = SIGILL;
info.si_errno = 0;

View File

@ -0,0 +1,417 @@
/* The Sparc trap table, bootloader gives us control at _start. */
__HEAD
.globl _start
_start:
.globl _stext
_stext:
.globl trapbase
trapbase:
#ifdef CONFIG_SMP
trapbase_cpu0:
#endif
/* We get control passed to us here at t_zero. */
t_zero: b gokernel; nop; nop; nop;
t_tflt: SRMMU_TFAULT /* Inst. Access Exception */
t_bins: TRAP_ENTRY(0x2, bad_instruction) /* Illegal Instruction */
t_pins: TRAP_ENTRY(0x3, priv_instruction) /* Privileged Instruction */
t_fpd: TRAP_ENTRY(0x4, fpd_trap_handler) /* Floating Point Disabled */
t_wovf: WINDOW_SPILL /* Window Overflow */
t_wunf: WINDOW_FILL /* Window Underflow */
t_mna: TRAP_ENTRY(0x7, mna_handler) /* Memory Address Not Aligned */
t_fpe: TRAP_ENTRY(0x8, fpe_trap_handler) /* Floating Point Exception */
t_dflt: SRMMU_DFAULT /* Data Miss Exception */
t_tio: TRAP_ENTRY(0xa, do_tag_overflow) /* Tagged Instruction Ovrflw */
t_wpt: TRAP_ENTRY(0xb, do_watchpoint) /* Watchpoint Detected */
t_badc: BAD_TRAP(0xc) BAD_TRAP(0xd) BAD_TRAP(0xe) BAD_TRAP(0xf) BAD_TRAP(0x10)
t_irq1: TRAP_ENTRY_INTERRUPT(1) /* IRQ Software/SBUS Level 1 */
t_irq2: TRAP_ENTRY_INTERRUPT(2) /* IRQ SBUS Level 2 */
t_irq3: TRAP_ENTRY_INTERRUPT(3) /* IRQ SCSI/DMA/SBUS Level 3 */
t_irq4: TRAP_ENTRY_INTERRUPT(4) /* IRQ Software Level 4 */
t_irq5: TRAP_ENTRY_INTERRUPT(5) /* IRQ SBUS/Ethernet Level 5 */
t_irq6: TRAP_ENTRY_INTERRUPT(6) /* IRQ Software Level 6 */
t_irq7: TRAP_ENTRY_INTERRUPT(7) /* IRQ Video/SBUS Level 5 */
t_irq8: TRAP_ENTRY_INTERRUPT(8) /* IRQ SBUS Level 6 */
t_irq9: TRAP_ENTRY_INTERRUPT(9) /* IRQ SBUS Level 7 */
t_irq10:TRAP_ENTRY_INTERRUPT(10) /* IRQ Timer #1 (one we use) */
t_irq11:TRAP_ENTRY_INTERRUPT(11) /* IRQ Floppy Intr. */
t_irq12:TRAP_ENTRY_INTERRUPT(12) /* IRQ Zilog serial chip */
t_irq13:TRAP_ENTRY_INTERRUPT(13) /* IRQ Audio Intr. */
t_irq14:TRAP_ENTRY_INTERRUPT(14) /* IRQ Timer #2 */
.globl t_nmi
t_nmi: TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m)
t_racc: TRAP_ENTRY(0x20, do_reg_access) /* General Register Access Error */
t_iacce:BAD_TRAP(0x21) /* Instr Access Error */
t_bad22:BAD_TRAP(0x22)
BAD_TRAP(0x23)
t_cpdis:TRAP_ENTRY(0x24, do_cp_disabled) /* Co-Processor Disabled */
t_uflsh:SKIP_TRAP(0x25, unimp_flush) /* Unimplemented FLUSH inst. */
t_bad26:BAD_TRAP(0x26) BAD_TRAP(0x27)
t_cpexc:TRAP_ENTRY(0x28, do_cp_exception) /* Co-Processor Exception */
t_dacce:SRMMU_DFAULT /* Data Access Error */
t_hwdz: TRAP_ENTRY(0x2a, do_hw_divzero) /* Division by zero, you lose... */
t_dserr:BAD_TRAP(0x2b) /* Data Store Error */
t_daccm:BAD_TRAP(0x2c) /* Data Access MMU-Miss */
t_bad2d:BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31)
t_bad32:BAD_TRAP(0x32) BAD_TRAP(0x33) BAD_TRAP(0x34) BAD_TRAP(0x35) BAD_TRAP(0x36)
t_bad37:BAD_TRAP(0x37) BAD_TRAP(0x38) BAD_TRAP(0x39) BAD_TRAP(0x3a) BAD_TRAP(0x3b)
t_iaccm:BAD_TRAP(0x3c) /* Instr Access MMU-Miss */
t_bad3d:BAD_TRAP(0x3d) BAD_TRAP(0x3e) BAD_TRAP(0x3f) BAD_TRAP(0x40) BAD_TRAP(0x41)
t_bad42:BAD_TRAP(0x42) BAD_TRAP(0x43) BAD_TRAP(0x44) BAD_TRAP(0x45) BAD_TRAP(0x46)
t_bad47:BAD_TRAP(0x47) BAD_TRAP(0x48) BAD_TRAP(0x49) BAD_TRAP(0x4a) BAD_TRAP(0x4b)
t_bad4c:BAD_TRAP(0x4c) BAD_TRAP(0x4d) BAD_TRAP(0x4e) BAD_TRAP(0x4f) BAD_TRAP(0x50)
t_bad51:BAD_TRAP(0x51) BAD_TRAP(0x52) BAD_TRAP(0x53) BAD_TRAP(0x54) BAD_TRAP(0x55)
t_bad56:BAD_TRAP(0x56) BAD_TRAP(0x57) BAD_TRAP(0x58) BAD_TRAP(0x59) BAD_TRAP(0x5a)
t_bad5b:BAD_TRAP(0x5b) BAD_TRAP(0x5c) BAD_TRAP(0x5d) BAD_TRAP(0x5e) BAD_TRAP(0x5f)
t_bad60:BAD_TRAP(0x60) BAD_TRAP(0x61) BAD_TRAP(0x62) BAD_TRAP(0x63) BAD_TRAP(0x64)
t_bad65:BAD_TRAP(0x65) BAD_TRAP(0x66) BAD_TRAP(0x67) BAD_TRAP(0x68) BAD_TRAP(0x69)
t_bad6a:BAD_TRAP(0x6a) BAD_TRAP(0x6b) BAD_TRAP(0x6c) BAD_TRAP(0x6d) BAD_TRAP(0x6e)
t_bad6f:BAD_TRAP(0x6f) BAD_TRAP(0x70) BAD_TRAP(0x71) BAD_TRAP(0x72) BAD_TRAP(0x73)
t_bad74:BAD_TRAP(0x74) BAD_TRAP(0x75) BAD_TRAP(0x76) BAD_TRAP(0x77) BAD_TRAP(0x78)
t_bad79:BAD_TRAP(0x79) BAD_TRAP(0x7a) BAD_TRAP(0x7b) BAD_TRAP(0x7c) BAD_TRAP(0x7d)
t_bad7e:BAD_TRAP(0x7e) BAD_TRAP(0x7f)
t_bad80:BAD_TRAP(0x80) /* SunOS System Call */
t_sbkpt:BREAKPOINT_TRAP /* Software Breakpoint/KGDB */
t_divz: TRAP_ENTRY(0x82, do_hw_divzero) /* Divide by zero trap */
t_flwin:TRAP_ENTRY(0x83, do_flush_windows) /* Flush Windows Trap */
t_clwin:BAD_TRAP(0x84) /* Clean Windows Trap */
t_rchk: BAD_TRAP(0x85) /* Range Check */
t_funal:BAD_TRAP(0x86) /* Fix Unaligned Access Trap */
t_iovf: BAD_TRAP(0x87) /* Integer Overflow Trap */
t_bad88:BAD_TRAP(0x88) /* Slowaris System Call */
t_bad89:BAD_TRAP(0x89) /* Net-B.S. System Call */
t_bad8a:BAD_TRAP(0x8a) BAD_TRAP(0x8b) BAD_TRAP(0x8c) BAD_TRAP(0x8d) BAD_TRAP(0x8e)
t_bad8f:BAD_TRAP(0x8f)
t_linux:LINUX_SYSCALL_TRAP /* Linux System Call */
t_bad91:BAD_TRAP(0x91) BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94) BAD_TRAP(0x95)
t_bad96:BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99) BAD_TRAP(0x9a)
t_bad9b:BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e) BAD_TRAP(0x9f)
t_getcc:GETCC_TRAP /* Get Condition Codes */
t_setcc:SETCC_TRAP /* Set Condition Codes */
t_getpsr:GETPSR_TRAP /* Get PSR Register */
t_bada3:BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6)
t_bada7:BAD_TRAP(0xa7)
t_bada8:BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab)
t_badac:BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0)
t_badb1:BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5)
t_badb6:BAD_TRAP(0xb6) BAD_TRAP(0xb7) BAD_TRAP(0xb8) BAD_TRAP(0xb9) BAD_TRAP(0xba)
t_badbb:BAD_TRAP(0xbb) BAD_TRAP(0xbc) BAD_TRAP(0xbd) BAD_TRAP(0xbe) BAD_TRAP(0xbf)
t_badc0:BAD_TRAP(0xc0) BAD_TRAP(0xc1) BAD_TRAP(0xc2) BAD_TRAP(0xc3) BAD_TRAP(0xc4)
t_badc5:BAD_TRAP(0xc5) BAD_TRAP(0xc6) BAD_TRAP(0xc7) BAD_TRAP(0xc8) BAD_TRAP(0xc9)
t_badca:BAD_TRAP(0xca) BAD_TRAP(0xcb) BAD_TRAP(0xcc) BAD_TRAP(0xcd) BAD_TRAP(0xce)
t_badcf:BAD_TRAP(0xcf) BAD_TRAP(0xd0) BAD_TRAP(0xd1) BAD_TRAP(0xd2) BAD_TRAP(0xd3)
t_badd4:BAD_TRAP(0xd4) BAD_TRAP(0xd5) BAD_TRAP(0xd6) BAD_TRAP(0xd7) BAD_TRAP(0xd8)
t_badd9:BAD_TRAP(0xd9) BAD_TRAP(0xda) BAD_TRAP(0xdb) BAD_TRAP(0xdc) BAD_TRAP(0xdd)
t_badde:BAD_TRAP(0xde) BAD_TRAP(0xdf) BAD_TRAP(0xe0) BAD_TRAP(0xe1) BAD_TRAP(0xe2)
t_bade3:BAD_TRAP(0xe3) BAD_TRAP(0xe4) BAD_TRAP(0xe5) BAD_TRAP(0xe6) BAD_TRAP(0xe7)
t_bade8:BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xec)
t_baded:BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1)
t_badf2:BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6)
t_badf7:BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb)
t_badfc:BAD_TRAP(0xfc)
t_kgdb: KGDB_TRAP(0xfd)
dbtrap: BAD_TRAP(0xfe) /* Debugger/PROM breakpoint #1 */
dbtrap2:BAD_TRAP(0xff) /* Debugger/PROM breakpoint #2 */
.globl end_traptable
end_traptable:
#ifdef CONFIG_SMP
/* Trap tables for the other cpus. */
.globl trapbase_cpu1, trapbase_cpu2, trapbase_cpu3
trapbase_cpu1:
BAD_TRAP(0x0)
SRMMU_TFAULT
TRAP_ENTRY(0x2, bad_instruction)
TRAP_ENTRY(0x3, priv_instruction)
TRAP_ENTRY(0x4, fpd_trap_handler)
WINDOW_SPILL
WINDOW_FILL
TRAP_ENTRY(0x7, mna_handler)
TRAP_ENTRY(0x8, fpe_trap_handler)
SRMMU_DFAULT
TRAP_ENTRY(0xa, do_tag_overflow)
TRAP_ENTRY(0xb, do_watchpoint)
BAD_TRAP(0xc) BAD_TRAP(0xd) BAD_TRAP(0xe) BAD_TRAP(0xf) BAD_TRAP(0x10)
TRAP_ENTRY_INTERRUPT(1) TRAP_ENTRY_INTERRUPT(2)
TRAP_ENTRY_INTERRUPT(3) TRAP_ENTRY_INTERRUPT(4)
TRAP_ENTRY_INTERRUPT(5) TRAP_ENTRY_INTERRUPT(6)
TRAP_ENTRY_INTERRUPT(7) TRAP_ENTRY_INTERRUPT(8)
TRAP_ENTRY_INTERRUPT(9) TRAP_ENTRY_INTERRUPT(10)
TRAP_ENTRY_INTERRUPT(11) TRAP_ENTRY_INTERRUPT(12)
TRAP_ENTRY_INTERRUPT(13) TRAP_ENTRY_INTERRUPT(14)
TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m)
TRAP_ENTRY(0x20, do_reg_access)
BAD_TRAP(0x21)
BAD_TRAP(0x22)
BAD_TRAP(0x23)
TRAP_ENTRY(0x24, do_cp_disabled)
SKIP_TRAP(0x25, unimp_flush)
BAD_TRAP(0x26)
BAD_TRAP(0x27)
TRAP_ENTRY(0x28, do_cp_exception)
SRMMU_DFAULT
TRAP_ENTRY(0x2a, do_hw_divzero)
BAD_TRAP(0x2b)
BAD_TRAP(0x2c)
BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31)
BAD_TRAP(0x32) BAD_TRAP(0x33) BAD_TRAP(0x34) BAD_TRAP(0x35) BAD_TRAP(0x36)
BAD_TRAP(0x37) BAD_TRAP(0x38) BAD_TRAP(0x39) BAD_TRAP(0x3a) BAD_TRAP(0x3b)
BAD_TRAP(0x3c) BAD_TRAP(0x3d) BAD_TRAP(0x3e) BAD_TRAP(0x3f) BAD_TRAP(0x40)
BAD_TRAP(0x41) BAD_TRAP(0x42) BAD_TRAP(0x43) BAD_TRAP(0x44) BAD_TRAP(0x45)
BAD_TRAP(0x46) BAD_TRAP(0x47) BAD_TRAP(0x48) BAD_TRAP(0x49) BAD_TRAP(0x4a)
BAD_TRAP(0x4b) BAD_TRAP(0x4c) BAD_TRAP(0x4d) BAD_TRAP(0x4e) BAD_TRAP(0x4f)
BAD_TRAP(0x50)
BAD_TRAP(0x51) BAD_TRAP(0x52) BAD_TRAP(0x53) BAD_TRAP(0x54) BAD_TRAP(0x55)
BAD_TRAP(0x56) BAD_TRAP(0x57) BAD_TRAP(0x58) BAD_TRAP(0x59) BAD_TRAP(0x5a)
BAD_TRAP(0x5b) BAD_TRAP(0x5c) BAD_TRAP(0x5d) BAD_TRAP(0x5e) BAD_TRAP(0x5f)
BAD_TRAP(0x60) BAD_TRAP(0x61) BAD_TRAP(0x62) BAD_TRAP(0x63) BAD_TRAP(0x64)
BAD_TRAP(0x65) BAD_TRAP(0x66) BAD_TRAP(0x67) BAD_TRAP(0x68) BAD_TRAP(0x69)
BAD_TRAP(0x6a) BAD_TRAP(0x6b) BAD_TRAP(0x6c) BAD_TRAP(0x6d) BAD_TRAP(0x6e)
BAD_TRAP(0x6f) BAD_TRAP(0x70) BAD_TRAP(0x71) BAD_TRAP(0x72) BAD_TRAP(0x73)
BAD_TRAP(0x74) BAD_TRAP(0x75) BAD_TRAP(0x76) BAD_TRAP(0x77) BAD_TRAP(0x78)
BAD_TRAP(0x79) BAD_TRAP(0x7a) BAD_TRAP(0x7b) BAD_TRAP(0x7c) BAD_TRAP(0x7d)
BAD_TRAP(0x7e) BAD_TRAP(0x7f)
BAD_TRAP(0x80)
BREAKPOINT_TRAP
TRAP_ENTRY(0x82, do_hw_divzero)
TRAP_ENTRY(0x83, do_flush_windows)
BAD_TRAP(0x84) BAD_TRAP(0x85) BAD_TRAP(0x86)
BAD_TRAP(0x87) BAD_TRAP(0x88) BAD_TRAP(0x89)
BAD_TRAP(0x8a) BAD_TRAP(0x8b) BAD_TRAP(0x8c)
BAD_TRAP(0x8d) BAD_TRAP(0x8e) BAD_TRAP(0x8f)
LINUX_SYSCALL_TRAP BAD_TRAP(0x91)
BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94)
BAD_TRAP(0x95) BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99)
BAD_TRAP(0x9a) BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e)
BAD_TRAP(0x9f)
GETCC_TRAP
SETCC_TRAP
GETPSR_TRAP
BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6)
BAD_TRAP(0xa7) BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab)
BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0)
BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5)
BAD_TRAP(0xb6) BAD_TRAP(0xb7) BAD_TRAP(0xb8) BAD_TRAP(0xb9) BAD_TRAP(0xba)
BAD_TRAP(0xbb) BAD_TRAP(0xbc) BAD_TRAP(0xbd) BAD_TRAP(0xbe) BAD_TRAP(0xbf)
BAD_TRAP(0xc0) BAD_TRAP(0xc1) BAD_TRAP(0xc2) BAD_TRAP(0xc3) BAD_TRAP(0xc4)
BAD_TRAP(0xc5) BAD_TRAP(0xc6) BAD_TRAP(0xc7) BAD_TRAP(0xc8) BAD_TRAP(0xc9)
BAD_TRAP(0xca) BAD_TRAP(0xcb) BAD_TRAP(0xcc) BAD_TRAP(0xcd) BAD_TRAP(0xce)
BAD_TRAP(0xcf) BAD_TRAP(0xd0) BAD_TRAP(0xd1) BAD_TRAP(0xd2) BAD_TRAP(0xd3)
BAD_TRAP(0xd4) BAD_TRAP(0xd5) BAD_TRAP(0xd6) BAD_TRAP(0xd7) BAD_TRAP(0xd8)
BAD_TRAP(0xd9) BAD_TRAP(0xda) BAD_TRAP(0xdb) BAD_TRAP(0xdc) BAD_TRAP(0xdd)
BAD_TRAP(0xde) BAD_TRAP(0xdf) BAD_TRAP(0xe0) BAD_TRAP(0xe1) BAD_TRAP(0xe2)
BAD_TRAP(0xe3) BAD_TRAP(0xe4) BAD_TRAP(0xe5) BAD_TRAP(0xe6) BAD_TRAP(0xe7)
BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xec)
BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1)
BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6)
BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb)
BAD_TRAP(0xfc)
KGDB_TRAP(0xfd)
BAD_TRAP(0xfe)
BAD_TRAP(0xff)
trapbase_cpu2:
BAD_TRAP(0x0)
SRMMU_TFAULT
TRAP_ENTRY(0x2, bad_instruction)
TRAP_ENTRY(0x3, priv_instruction)
TRAP_ENTRY(0x4, fpd_trap_handler)
WINDOW_SPILL
WINDOW_FILL
TRAP_ENTRY(0x7, mna_handler)
TRAP_ENTRY(0x8, fpe_trap_handler)
SRMMU_DFAULT
TRAP_ENTRY(0xa, do_tag_overflow)
TRAP_ENTRY(0xb, do_watchpoint)
BAD_TRAP(0xc) BAD_TRAP(0xd) BAD_TRAP(0xe) BAD_TRAP(0xf) BAD_TRAP(0x10)
TRAP_ENTRY_INTERRUPT(1)
TRAP_ENTRY_INTERRUPT(2)
TRAP_ENTRY_INTERRUPT(3)
TRAP_ENTRY_INTERRUPT(4)
TRAP_ENTRY_INTERRUPT(5)
TRAP_ENTRY_INTERRUPT(6)
TRAP_ENTRY_INTERRUPT(7)
TRAP_ENTRY_INTERRUPT(8)
TRAP_ENTRY_INTERRUPT(9)
TRAP_ENTRY_INTERRUPT(10)
TRAP_ENTRY_INTERRUPT(11)
TRAP_ENTRY_INTERRUPT(12)
TRAP_ENTRY_INTERRUPT(13)
TRAP_ENTRY_INTERRUPT(14)
TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m)
TRAP_ENTRY(0x20, do_reg_access)
BAD_TRAP(0x21)
BAD_TRAP(0x22)
BAD_TRAP(0x23)
TRAP_ENTRY(0x24, do_cp_disabled)
SKIP_TRAP(0x25, unimp_flush)
BAD_TRAP(0x26)
BAD_TRAP(0x27)
TRAP_ENTRY(0x28, do_cp_exception)
SRMMU_DFAULT
TRAP_ENTRY(0x2a, do_hw_divzero)
BAD_TRAP(0x2b)
BAD_TRAP(0x2c)
BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31)
BAD_TRAP(0x32) BAD_TRAP(0x33) BAD_TRAP(0x34) BAD_TRAP(0x35) BAD_TRAP(0x36)
BAD_TRAP(0x37) BAD_TRAP(0x38) BAD_TRAP(0x39) BAD_TRAP(0x3a) BAD_TRAP(0x3b)
BAD_TRAP(0x3c) BAD_TRAP(0x3d) BAD_TRAP(0x3e) BAD_TRAP(0x3f) BAD_TRAP(0x40)
BAD_TRAP(0x41) BAD_TRAP(0x42) BAD_TRAP(0x43) BAD_TRAP(0x44) BAD_TRAP(0x45)
BAD_TRAP(0x46) BAD_TRAP(0x47) BAD_TRAP(0x48) BAD_TRAP(0x49) BAD_TRAP(0x4a)
BAD_TRAP(0x4b) BAD_TRAP(0x4c) BAD_TRAP(0x4d) BAD_TRAP(0x4e) BAD_TRAP(0x4f)
BAD_TRAP(0x50)
BAD_TRAP(0x51) BAD_TRAP(0x52) BAD_TRAP(0x53) BAD_TRAP(0x54) BAD_TRAP(0x55)
BAD_TRAP(0x56) BAD_TRAP(0x57) BAD_TRAP(0x58) BAD_TRAP(0x59) BAD_TRAP(0x5a)
BAD_TRAP(0x5b) BAD_TRAP(0x5c) BAD_TRAP(0x5d) BAD_TRAP(0x5e) BAD_TRAP(0x5f)
BAD_TRAP(0x60) BAD_TRAP(0x61) BAD_TRAP(0x62) BAD_TRAP(0x63) BAD_TRAP(0x64)
BAD_TRAP(0x65) BAD_TRAP(0x66) BAD_TRAP(0x67) BAD_TRAP(0x68) BAD_TRAP(0x69)
BAD_TRAP(0x6a) BAD_TRAP(0x6b) BAD_TRAP(0x6c) BAD_TRAP(0x6d) BAD_TRAP(0x6e)
BAD_TRAP(0x6f) BAD_TRAP(0x70) BAD_TRAP(0x71) BAD_TRAP(0x72) BAD_TRAP(0x73)
BAD_TRAP(0x74) BAD_TRAP(0x75) BAD_TRAP(0x76) BAD_TRAP(0x77) BAD_TRAP(0x78)
BAD_TRAP(0x79) BAD_TRAP(0x7a) BAD_TRAP(0x7b) BAD_TRAP(0x7c) BAD_TRAP(0x7d)
BAD_TRAP(0x7e) BAD_TRAP(0x7f)
BAD_TRAP(0x80)
BREAKPOINT_TRAP
TRAP_ENTRY(0x82, do_hw_divzero)
TRAP_ENTRY(0x83, do_flush_windows)
BAD_TRAP(0x84)
BAD_TRAP(0x85)
BAD_TRAP(0x86) BAD_TRAP(0x87) BAD_TRAP(0x88)
BAD_TRAP(0x89) BAD_TRAP(0x8a) BAD_TRAP(0x8b) BAD_TRAP(0x8c)
BAD_TRAP(0x8d) BAD_TRAP(0x8e) BAD_TRAP(0x8f)
LINUX_SYSCALL_TRAP BAD_TRAP(0x91)
BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94)
BAD_TRAP(0x95) BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99)
BAD_TRAP(0x9a) BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e)
BAD_TRAP(0x9f)
GETCC_TRAP
SETCC_TRAP
GETPSR_TRAP
BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6)
BAD_TRAP(0xa7) BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab)
BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0)
BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5)
BAD_TRAP(0xb6) BAD_TRAP(0xb7) BAD_TRAP(0xb8) BAD_TRAP(0xb9) BAD_TRAP(0xba)
BAD_TRAP(0xbb) BAD_TRAP(0xbc) BAD_TRAP(0xbd) BAD_TRAP(0xbe) BAD_TRAP(0xbf)
BAD_TRAP(0xc0) BAD_TRAP(0xc1) BAD_TRAP(0xc2) BAD_TRAP(0xc3) BAD_TRAP(0xc4)
BAD_TRAP(0xc5) BAD_TRAP(0xc6) BAD_TRAP(0xc7) BAD_TRAP(0xc8) BAD_TRAP(0xc9)
BAD_TRAP(0xca) BAD_TRAP(0xcb) BAD_TRAP(0xcc) BAD_TRAP(0xcd) BAD_TRAP(0xce)
BAD_TRAP(0xcf) BAD_TRAP(0xd0) BAD_TRAP(0xd1) BAD_TRAP(0xd2) BAD_TRAP(0xd3)
BAD_TRAP(0xd4) BAD_TRAP(0xd5) BAD_TRAP(0xd6) BAD_TRAP(0xd7) BAD_TRAP(0xd8)
BAD_TRAP(0xd9) BAD_TRAP(0xda) BAD_TRAP(0xdb) BAD_TRAP(0xdc) BAD_TRAP(0xdd)
BAD_TRAP(0xde) BAD_TRAP(0xdf) BAD_TRAP(0xe0) BAD_TRAP(0xe1) BAD_TRAP(0xe2)
BAD_TRAP(0xe3) BAD_TRAP(0xe4) BAD_TRAP(0xe5) BAD_TRAP(0xe6) BAD_TRAP(0xe7)
BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xec)
BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1)
BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6)
BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb)
BAD_TRAP(0xfc)
KGDB_TRAP(0xfd)
BAD_TRAP(0xfe)
BAD_TRAP(0xff)
trapbase_cpu3:
BAD_TRAP(0x0)
SRMMU_TFAULT
TRAP_ENTRY(0x2, bad_instruction)
TRAP_ENTRY(0x3, priv_instruction)
TRAP_ENTRY(0x4, fpd_trap_handler)
WINDOW_SPILL
WINDOW_FILL
TRAP_ENTRY(0x7, mna_handler)
TRAP_ENTRY(0x8, fpe_trap_handler)
SRMMU_DFAULT
TRAP_ENTRY(0xa, do_tag_overflow)
TRAP_ENTRY(0xb, do_watchpoint)
BAD_TRAP(0xc) BAD_TRAP(0xd) BAD_TRAP(0xe) BAD_TRAP(0xf) BAD_TRAP(0x10)
TRAP_ENTRY_INTERRUPT(1)
TRAP_ENTRY_INTERRUPT(2)
TRAP_ENTRY_INTERRUPT(3)
TRAP_ENTRY_INTERRUPT(4)
TRAP_ENTRY_INTERRUPT(5)
TRAP_ENTRY_INTERRUPT(6)
TRAP_ENTRY_INTERRUPT(7)
TRAP_ENTRY_INTERRUPT(8)
TRAP_ENTRY_INTERRUPT(9)
TRAP_ENTRY_INTERRUPT(10)
TRAP_ENTRY_INTERRUPT(11)
TRAP_ENTRY_INTERRUPT(12)
TRAP_ENTRY_INTERRUPT(13)
TRAP_ENTRY_INTERRUPT(14)
TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m)
TRAP_ENTRY(0x20, do_reg_access)
BAD_TRAP(0x21)
BAD_TRAP(0x22)
BAD_TRAP(0x23)
TRAP_ENTRY(0x24, do_cp_disabled)
SKIP_TRAP(0x25, unimp_flush)
BAD_TRAP(0x26)
BAD_TRAP(0x27)
TRAP_ENTRY(0x28, do_cp_exception)
SRMMU_DFAULT
TRAP_ENTRY(0x2a, do_hw_divzero)
BAD_TRAP(0x2b) BAD_TRAP(0x2c)
BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31)
BAD_TRAP(0x32) BAD_TRAP(0x33) BAD_TRAP(0x34) BAD_TRAP(0x35) BAD_TRAP(0x36)
BAD_TRAP(0x37) BAD_TRAP(0x38) BAD_TRAP(0x39) BAD_TRAP(0x3a) BAD_TRAP(0x3b)
BAD_TRAP(0x3c) BAD_TRAP(0x3d) BAD_TRAP(0x3e) BAD_TRAP(0x3f) BAD_TRAP(0x40)
BAD_TRAP(0x41) BAD_TRAP(0x42) BAD_TRAP(0x43) BAD_TRAP(0x44) BAD_TRAP(0x45)
BAD_TRAP(0x46) BAD_TRAP(0x47) BAD_TRAP(0x48) BAD_TRAP(0x49) BAD_TRAP(0x4a)
BAD_TRAP(0x4b) BAD_TRAP(0x4c) BAD_TRAP(0x4d) BAD_TRAP(0x4e) BAD_TRAP(0x4f)
BAD_TRAP(0x50)
BAD_TRAP(0x51) BAD_TRAP(0x52) BAD_TRAP(0x53) BAD_TRAP(0x54) BAD_TRAP(0x55)
BAD_TRAP(0x56) BAD_TRAP(0x57) BAD_TRAP(0x58) BAD_TRAP(0x59) BAD_TRAP(0x5a)
BAD_TRAP(0x5b) BAD_TRAP(0x5c) BAD_TRAP(0x5d) BAD_TRAP(0x5e) BAD_TRAP(0x5f)
BAD_TRAP(0x60) BAD_TRAP(0x61) BAD_TRAP(0x62) BAD_TRAP(0x63) BAD_TRAP(0x64)
BAD_TRAP(0x65) BAD_TRAP(0x66) BAD_TRAP(0x67) BAD_TRAP(0x68) BAD_TRAP(0x69)
BAD_TRAP(0x6a) BAD_TRAP(0x6b) BAD_TRAP(0x6c) BAD_TRAP(0x6d) BAD_TRAP(0x6e)
BAD_TRAP(0x6f) BAD_TRAP(0x70) BAD_TRAP(0x71) BAD_TRAP(0x72) BAD_TRAP(0x73)
BAD_TRAP(0x74) BAD_TRAP(0x75) BAD_TRAP(0x76) BAD_TRAP(0x77) BAD_TRAP(0x78)
BAD_TRAP(0x79) BAD_TRAP(0x7a) BAD_TRAP(0x7b) BAD_TRAP(0x7c) BAD_TRAP(0x7d)
BAD_TRAP(0x7e) BAD_TRAP(0x7f)
BAD_TRAP(0x80)
BREAKPOINT_TRAP
TRAP_ENTRY(0x82, do_hw_divzero)
TRAP_ENTRY(0x83, do_flush_windows)
BAD_TRAP(0x84) BAD_TRAP(0x85)
BAD_TRAP(0x86) BAD_TRAP(0x87) BAD_TRAP(0x88)
BAD_TRAP(0x89) BAD_TRAP(0x8a) BAD_TRAP(0x8b) BAD_TRAP(0x8c)
BAD_TRAP(0x8d) BAD_TRAP(0x8e) BAD_TRAP(0x8f)
LINUX_SYSCALL_TRAP
BAD_TRAP(0x91) BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94)
BAD_TRAP(0x95) BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99)
BAD_TRAP(0x9a) BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e)
BAD_TRAP(0x9f)
GETCC_TRAP
SETCC_TRAP
GETPSR_TRAP
BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6)
BAD_TRAP(0xa7) BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab)
BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0)
BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5)
BAD_TRAP(0xb6) BAD_TRAP(0xb7) BAD_TRAP(0xb8) BAD_TRAP(0xb9) BAD_TRAP(0xba)
BAD_TRAP(0xbb) BAD_TRAP(0xbc) BAD_TRAP(0xbd) BAD_TRAP(0xbe) BAD_TRAP(0xbf)
BAD_TRAP(0xc0) BAD_TRAP(0xc1) BAD_TRAP(0xc2) BAD_TRAP(0xc3) BAD_TRAP(0xc4)
BAD_TRAP(0xc5) BAD_TRAP(0xc6) BAD_TRAP(0xc7) BAD_TRAP(0xc8) BAD_TRAP(0xc9)
BAD_TRAP(0xca) BAD_TRAP(0xcb) BAD_TRAP(0xcc) BAD_TRAP(0xcd) BAD_TRAP(0xce)
BAD_TRAP(0xcf) BAD_TRAP(0xd0) BAD_TRAP(0xd1) BAD_TRAP(0xd2) BAD_TRAP(0xd3)
BAD_TRAP(0xd4) BAD_TRAP(0xd5) BAD_TRAP(0xd6) BAD_TRAP(0xd7) BAD_TRAP(0xd8)
BAD_TRAP(0xd9) BAD_TRAP(0xda) BAD_TRAP(0xdb) BAD_TRAP(0xdc) BAD_TRAP(0xdd)
BAD_TRAP(0xde) BAD_TRAP(0xdf) BAD_TRAP(0xe0) BAD_TRAP(0xe1) BAD_TRAP(0xe2)
BAD_TRAP(0xe3) BAD_TRAP(0xe4) BAD_TRAP(0xe5) BAD_TRAP(0xe6) BAD_TRAP(0xe7)
BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xec)
BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1)
BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6)
BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb)
BAD_TRAP(0xfc)
KGDB_TRAP(0xfd)
BAD_TRAP(0xfe)
BAD_TRAP(0xff)
#endif

View File

@ -21,7 +21,6 @@
#include <linux/bitops.h>
#include <linux/perf_event.h>
#include <linux/ratelimit.h>
#include <linux/bitops.h>
#include <asm/fpumacro.h>
#include <asm/cacheflush.h>

View File

@ -163,9 +163,8 @@ spwin_fromuser:
* the label 'spwin_user_stack_is_bolixed' which will take
* care of things at that point.
*/
.globl spwin_mmu_patchme
spwin_mmu_patchme: b spwin_sun4c_stackchk
andcc %sp, 0x7, %g0
b spwin_srmmu_stackchk
andcc %sp, 0x7, %g0
spwin_good_ustack:
/* LOCATION: Window to be saved */
@ -306,73 +305,6 @@ spwin_bad_ustack_from_kernel:
* As noted above %curptr cannot be touched by this routine at all.
*/
spwin_sun4c_stackchk:
/* LOCATION: Window to be saved on the stack */
/* See if the stack is in the address space hole but first,
* check results of callers andcc %sp, 0x7, %g0
*/
be 1f
sra %sp, 29, %glob_tmp
rd %psr, %glob_tmp
b spwin_user_stack_is_bolixed + 0x4
nop
1:
add %glob_tmp, 0x1, %glob_tmp
andncc %glob_tmp, 0x1, %g0
be 1f
and %sp, 0xfff, %glob_tmp ! delay slot
rd %psr, %glob_tmp
b spwin_user_stack_is_bolixed + 0x4
nop
/* See if our dump area will be on more than one
* page.
*/
1:
add %glob_tmp, 0x38, %glob_tmp
andncc %glob_tmp, 0xff8, %g0
be spwin_sun4c_onepage ! only one page to check
lda [%sp] ASI_PTE, %glob_tmp ! have to check first page anyways
spwin_sun4c_twopages:
/* Is first page ok permission wise? */
srl %glob_tmp, 29, %glob_tmp
cmp %glob_tmp, 0x6
be 1f
add %sp, 0x38, %glob_tmp /* Is second page in vma hole? */
rd %psr, %glob_tmp
b spwin_user_stack_is_bolixed + 0x4
nop
1:
sra %glob_tmp, 29, %glob_tmp
add %glob_tmp, 0x1, %glob_tmp
andncc %glob_tmp, 0x1, %g0
be 1f
add %sp, 0x38, %glob_tmp
rd %psr, %glob_tmp
b spwin_user_stack_is_bolixed + 0x4
nop
1:
lda [%glob_tmp] ASI_PTE, %glob_tmp
spwin_sun4c_onepage:
srl %glob_tmp, 29, %glob_tmp
cmp %glob_tmp, 0x6 ! can user write to it?
be spwin_good_ustack ! success
nop
rd %psr, %glob_tmp
b spwin_user_stack_is_bolixed + 0x4
nop
/* This is a generic SRMMU routine. As far as I know this
* works for all current v8/srmmu implementations, we'll
* see...

View File

@ -131,12 +131,9 @@ fwin_from_user:
/* LOCATION: Window 'W' */
/* Branch to the architecture specific stack validation
* routine. They can be found below...
*/
.globl fwin_mmu_patchme
fwin_mmu_patchme: b sun4c_fwin_stackchk
andcc %sp, 0x7, %g0
/* Branch to the stack validation routine */
b srmmu_fwin_stackchk
andcc %sp, 0x7, %g0
#define STACK_OFFSET (THREAD_SIZE - TRACEREG_SZ - STACKFRAME_SZ)
@ -242,57 +239,6 @@ fwin_user_finish_up:
* 'someone elses' window possibly.
*/
.align 4
sun4c_fwin_stackchk:
/* LOCATION: Window 'W' */
/* Caller did 'andcc %sp, 0x7, %g0' */
be 1f
and %sp, 0xfff, %l0 ! delay slot
b,a fwin_user_stack_is_bolixed
/* See if we have to check the sanity of one page or two */
1:
add %l0, 0x38, %l0
sra %sp, 29, %l5
add %l5, 0x1, %l5
andncc %l5, 0x1, %g0
be 1f
andncc %l0, 0xff8, %g0
b,a fwin_user_stack_is_bolixed /* %sp is in vma hole, yuck */
1:
be sun4c_fwin_onepage /* Only one page to check */
lda [%sp] ASI_PTE, %l1
sun4c_fwin_twopages:
add %sp, 0x38, %l0
sra %l0, 29, %l5
add %l5, 0x1, %l5
andncc %l5, 0x1, %g0
be 1f
lda [%l0] ASI_PTE, %l1
b,a fwin_user_stack_is_bolixed /* Second page in vma hole */
1:
srl %l1, 29, %l1
andcc %l1, 0x4, %g0
bne sun4c_fwin_onepage
lda [%sp] ASI_PTE, %l1
b,a fwin_user_stack_is_bolixed /* Second page has bad perms */
sun4c_fwin_onepage:
srl %l1, 29, %l1
andcc %l1, 0x4, %g0
bne fwin_user_stack_is_ok
nop
/* A page had bad page permissions, losing... */
b,a fwin_user_stack_is_bolixed
.globl srmmu_fwin_stackchk
srmmu_fwin_stackchk:
/* LOCATION: Window 'W' */

View File

@ -4,7 +4,7 @@
asflags-y := -ansi -DST_DIV0=0x02
ccflags-y := -Werror
lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
lib-$(CONFIG_SPARC32) += ashrdi3.o
lib-$(CONFIG_SPARC32) += memcpy.o memset.o
lib-y += strlen.o
lib-y += checksum_$(BITS).o
@ -13,7 +13,7 @@ lib-y += memscan_$(BITS).o memcmp.o strncmp_$(BITS).o
lib-y += strncpy_from_user_$(BITS).o strlen_user_$(BITS).o
lib-$(CONFIG_SPARC32) += divdi3.o udivdi3.o
lib-$(CONFIG_SPARC32) += copy_user.o locks.o
lib-y += atomic_$(BITS).o
lib-$(CONFIG_SPARC64) += atomic_64.o
lib-$(CONFIG_SPARC32) += lshrdi3.o ashldi3.o
lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o
@ -40,7 +40,7 @@ lib-$(CONFIG_SPARC64) += copy_in_user.o user_fixup.o memmove.o
lib-$(CONFIG_SPARC64) += mcount.o ipcsum.o xor.o hweight.o ffs.o
obj-y += iomap.o
obj-$(CONFIG_SPARC32) += atomic32.o
obj-$(CONFIG_SPARC32) += atomic32.o ucmpdi2.o
obj-y += ksyms.o
obj-$(CONFIG_SPARC64) += PeeCeeI.o
obj-y += usercopy.o

View File

@ -5,10 +5,10 @@
* Copyright (C) 1999 David S. Miller (davem@redhat.com)
*/
#include <linux/linkage.h>
.text
.align 4
.globl __ashldi3
__ashldi3:
ENTRY(__ashldi3)
cmp %o2, 0
be 9f
mov 0x20, %g2
@ -32,3 +32,4 @@ __ashldi3:
9:
retl
nop
ENDPROC(__ashldi3)

View File

@ -5,10 +5,10 @@
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
#include <linux/linkage.h>
.text
.align 4
.globl __ashrdi3
__ashrdi3:
ENTRY(__ashrdi3)
tst %o2
be 3f
or %g0, 32, %g2
@ -34,3 +34,4 @@ __ashrdi3:
3:
jmpl %o7 + 8, %g0
nop
ENDPROC(__ashrdi3)

View File

@ -1,44 +0,0 @@
/* atomic.S: Move this stuff here for better ICACHE hit rates.
*
* Copyright (C) 1996 David S. Miller (davem@caipfs.rutgers.edu)
*/
#include <asm/ptrace.h>
#include <asm/psr.h>
.text
.align 4
.globl __atomic_begin
__atomic_begin:
#ifndef CONFIG_SMP
.globl ___xchg32_sun4c
___xchg32_sun4c:
rd %psr, %g3
andcc %g3, PSR_PIL, %g0
bne 1f
nop
wr %g3, PSR_PIL, %psr
nop; nop; nop
1:
andcc %g3, PSR_PIL, %g0
ld [%g1], %g7
bne 1f
st %g2, [%g1]
wr %g3, 0x0, %psr
nop; nop; nop
1:
mov %g7, %g2
jmpl %o7 + 8, %g0
mov %g4, %o7
.globl ___xchg32_sun4md
___xchg32_sun4md:
swap [%g1], %g2
jmpl %o7 + 8, %g0
mov %g4, %o7
#endif
.globl __atomic_end
__atomic_end:

View File

@ -3,6 +3,7 @@
* Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net)
*/
#include <linux/linkage.h>
#include <asm/asi.h>
#include <asm/backoff.h>
@ -13,9 +14,7 @@
* memory barriers, and a second which returns
* a value and does the barriers.
*/
.globl atomic_add
.type atomic_add,#function
atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
BACKOFF_SETUP(%o2)
1: lduw [%o1], %g1
add %g1, %o0, %g7
@ -26,11 +25,9 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
retl
nop
2: BACKOFF_SPIN(%o2, %o3, 1b)
.size atomic_add, .-atomic_add
ENDPROC(atomic_add)
.globl atomic_sub
.type atomic_sub,#function
atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
BACKOFF_SETUP(%o2)
1: lduw [%o1], %g1
sub %g1, %o0, %g7
@ -41,11 +38,9 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
retl
nop
2: BACKOFF_SPIN(%o2, %o3, 1b)
.size atomic_sub, .-atomic_sub
ENDPROC(atomic_sub)
.globl atomic_add_ret
.type atomic_add_ret,#function
atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
BACKOFF_SETUP(%o2)
1: lduw [%o1], %g1
add %g1, %o0, %g7
@ -56,11 +51,9 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
retl
sra %g1, 0, %o0
2: BACKOFF_SPIN(%o2, %o3, 1b)
.size atomic_add_ret, .-atomic_add_ret
ENDPROC(atomic_add_ret)
.globl atomic_sub_ret
.type atomic_sub_ret,#function
atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
BACKOFF_SETUP(%o2)
1: lduw [%o1], %g1
sub %g1, %o0, %g7
@ -71,11 +64,9 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
retl
sra %g1, 0, %o0
2: BACKOFF_SPIN(%o2, %o3, 1b)
.size atomic_sub_ret, .-atomic_sub_ret
ENDPROC(atomic_sub_ret)
.globl atomic64_add
.type atomic64_add,#function
atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
BACKOFF_SETUP(%o2)
1: ldx [%o1], %g1
add %g1, %o0, %g7
@ -86,11 +77,9 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
retl
nop
2: BACKOFF_SPIN(%o2, %o3, 1b)
.size atomic64_add, .-atomic64_add
ENDPROC(atomic64_add)
.globl atomic64_sub
.type atomic64_sub,#function
atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
BACKOFF_SETUP(%o2)
1: ldx [%o1], %g1
sub %g1, %o0, %g7
@ -101,11 +90,9 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
retl
nop
2: BACKOFF_SPIN(%o2, %o3, 1b)
.size atomic64_sub, .-atomic64_sub
ENDPROC(atomic64_sub)
.globl atomic64_add_ret
.type atomic64_add_ret,#function
atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
BACKOFF_SETUP(%o2)
1: ldx [%o1], %g1
add %g1, %o0, %g7
@ -116,11 +103,9 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
retl
add %g1, %o0, %o0
2: BACKOFF_SPIN(%o2, %o3, 1b)
.size atomic64_add_ret, .-atomic64_add_ret
ENDPROC(atomic64_add_ret)
.globl atomic64_sub_ret
.type atomic64_sub_ret,#function
atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
BACKOFF_SETUP(%o2)
1: ldx [%o1], %g1
sub %g1, %o0, %g7
@ -131,4 +116,4 @@ atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
retl
sub %g1, %o0, %o0
2: BACKOFF_SPIN(%o2, %o3, 1b)
.size atomic64_sub_ret, .-atomic64_sub_ret
ENDPROC(atomic64_sub_ret)

View File

@ -3,14 +3,13 @@
* Copyright (C) 2000, 2007 David S. Miller (davem@davemloft.net)
*/
#include <linux/linkage.h>
#include <asm/asi.h>
#include <asm/backoff.h>
.text
.globl test_and_set_bit
.type test_and_set_bit,#function
test_and_set_bit: /* %o0=nr, %o1=addr */
ENTRY(test_and_set_bit) /* %o0=nr, %o1=addr */
BACKOFF_SETUP(%o3)
srlx %o0, 6, %g1
mov 1, %o2
@ -29,11 +28,9 @@ test_and_set_bit: /* %o0=nr, %o1=addr */
retl
nop
2: BACKOFF_SPIN(%o3, %o4, 1b)
.size test_and_set_bit, .-test_and_set_bit
ENDPROC(test_and_set_bit)
.globl test_and_clear_bit
.type test_and_clear_bit,#function
test_and_clear_bit: /* %o0=nr, %o1=addr */
ENTRY(test_and_clear_bit) /* %o0=nr, %o1=addr */
BACKOFF_SETUP(%o3)
srlx %o0, 6, %g1
mov 1, %o2
@ -52,11 +49,9 @@ test_and_clear_bit: /* %o0=nr, %o1=addr */
retl
nop
2: BACKOFF_SPIN(%o3, %o4, 1b)
.size test_and_clear_bit, .-test_and_clear_bit
ENDPROC(test_and_clear_bit)
.globl test_and_change_bit
.type test_and_change_bit,#function
test_and_change_bit: /* %o0=nr, %o1=addr */
ENTRY(test_and_change_bit) /* %o0=nr, %o1=addr */
BACKOFF_SETUP(%o3)
srlx %o0, 6, %g1
mov 1, %o2
@ -75,11 +70,9 @@ test_and_change_bit: /* %o0=nr, %o1=addr */
retl
nop
2: BACKOFF_SPIN(%o3, %o4, 1b)
.size test_and_change_bit, .-test_and_change_bit
ENDPROC(test_and_change_bit)
.globl set_bit
.type set_bit,#function
set_bit: /* %o0=nr, %o1=addr */
ENTRY(set_bit) /* %o0=nr, %o1=addr */
BACKOFF_SETUP(%o3)
srlx %o0, 6, %g1
mov 1, %o2
@ -96,11 +89,9 @@ set_bit: /* %o0=nr, %o1=addr */
retl
nop
2: BACKOFF_SPIN(%o3, %o4, 1b)
.size set_bit, .-set_bit
ENDPROC(set_bit)
.globl clear_bit
.type clear_bit,#function
clear_bit: /* %o0=nr, %o1=addr */
ENTRY(clear_bit) /* %o0=nr, %o1=addr */
BACKOFF_SETUP(%o3)
srlx %o0, 6, %g1
mov 1, %o2
@ -117,11 +108,9 @@ clear_bit: /* %o0=nr, %o1=addr */
retl
nop
2: BACKOFF_SPIN(%o3, %o4, 1b)
.size clear_bit, .-clear_bit
ENDPROC(clear_bit)
.globl change_bit
.type change_bit,#function
change_bit: /* %o0=nr, %o1=addr */
ENTRY(change_bit) /* %o0=nr, %o1=addr */
BACKOFF_SETUP(%o3)
srlx %o0, 6, %g1
mov 1, %o2
@ -138,4 +127,4 @@ change_bit: /* %o0=nr, %o1=addr */
retl
nop
2: BACKOFF_SPIN(%o3, %o4, 1b)
.size change_bit, .-change_bit
ENDPROC(change_bit)

View File

@ -4,6 +4,7 @@
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
#include <linux/linkage.h>
#include <asm/page.h>
/* Zero out 64 bytes of memory at (buf + offset).
@ -44,10 +45,7 @@
*/
.text
.align 4
.globl bzero_1page, __copy_1page
bzero_1page:
ENTRY(bzero_1page)
/* NOTE: If you change the number of insns of this routine, please check
* arch/sparc/mm/hypersparc.S */
/* %o0 = buf */
@ -65,8 +63,9 @@ bzero_1page:
retl
nop
ENDPROC(bzero_1page)
__copy_1page:
ENTRY(__copy_1page)
/* NOTE: If you change the number of insns of this routine, please check
* arch/sparc/mm/hypersparc.S */
/* %o0 = dst, %o1 = src */
@ -87,3 +86,4 @@ __copy_1page:
retl
nop
ENDPROC(__copy_1page)

Some files were not shown because too many files have changed in this diff Show More