dect
/
linux-2.6
Archived
13
0
Fork 0

Merge branches 'x86/acpi', 'x86/asm', 'x86/cpudetect', 'x86/crashdump', 'x86/debug', 'x86/defconfig', 'x86/doc', 'x86/header-fixes', 'x86/headers' and 'x86/minor-fixes' into x86/core

This commit is contained in:
Ingo Molnar 2009-02-13 09:46:36 +01:00
38 changed files with 1289 additions and 794 deletions

View File

@ -199,6 +199,10 @@ char *__init __acpi_map_table(unsigned long phys_addr, unsigned long size)
return __va(phys_addr);
}
char *__init __acpi_unmap_table(unsigned long virt_addr, unsigned long size)
{
}
/* --------------------------------------------------------------------------
Boot-time Table Parsing
-------------------------------------------------------------------------- */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -102,9 +102,6 @@ static inline void disable_acpi(void)
acpi_noirq = 1;
}
/* Fixmap pages to reserve for ACPI boot-time tables (see fixmap.h) */
#define FIX_ACPI_PAGES 4
extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq);
static inline void acpi_noirq_set(void) { acpi_noirq = 1; }

View File

@ -1,5 +1,55 @@
/*
* Some macros to handle stack frames in assembly.
x86 function call convention, 64-bit:
-------------------------------------
arguments | callee-saved | extra caller-saved | return
[callee-clobbered] | | [callee-clobbered] |
---------------------------------------------------------------------------
rdi rsi rdx rcx r8-9 | rbx rbp [*] r12-15 | r10-11 | rax, rdx [**]
( rsp is obviously invariant across normal function calls. (gcc can 'merge'
functions when it sees tail-call optimization possibilities) rflags is
clobbered. Leftover arguments are passed over the stack frame.)
[*] In the frame-pointers case rbp is fixed to the stack frame.
[**] for struct return values wider than 64 bits the return convention is a
bit more complex: up to 128 bits width we return small structures
straight in rax, rdx. For structures larger than that (3 words or
larger) the caller puts a pointer to an on-stack return struct
[allocated in the caller's stack frame] into the first argument - i.e.
into rdi. All other arguments shift up by one in this case.
Fortunately this case is rare in the kernel.
For 32-bit we have the following conventions - kernel is built with
-mregparm=3 and -freg-struct-return:
x86 function calling convention, 32-bit:
----------------------------------------
arguments | callee-saved | extra caller-saved | return
[callee-clobbered] | | [callee-clobbered] |
-------------------------------------------------------------------------
eax edx ecx | ebx edi esi ebp [*] | <none> | eax, edx [**]
( here too esp is obviously invariant across normal function calls. eflags
is clobbered. Leftover arguments are passed over the stack frame. )
[*] In the frame-pointers case ebp is fixed to the stack frame.
[**] We build with -freg-struct-return, which on 32-bit means similar
semantics as on 64-bit: edx can be used for a second return value
(i.e. covering integer and structure sizes up to 64 bits) - after that
it gets more complex and more expensive: 3-word or larger struct returns
get done in the caller's frame and the pointer to the return struct goes
into regparm0, i.e. eax - the other arguments shift up and the
function's register parameters degenerate to regparm=2 in essence.
*/
/*
* 64-bit system call stack frame layout defines and helpers,
* for assembly code:
*/
#define R15 0
@ -9,7 +59,7 @@
#define RBP 32
#define RBX 40
/* arguments: interrupts/non tracing syscalls only save upto here*/
/* arguments: interrupts/non tracing syscalls only save up to here: */
#define R11 48
#define R10 56
#define R9 64
@ -22,7 +72,7 @@
#define ORIG_RAX 120 /* + error_code */
/* end of arguments */
/* cpu exception frame or undefined in case of fast syscall. */
/* cpu exception frame or undefined in case of fast syscall: */
#define RIP 128
#define CS 136
#define EFLAGS 144

View File

@ -95,10 +95,6 @@ enum fixed_addresses {
(__end_of_permanent_fixed_addresses & 255),
FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1,
FIX_WP_TEST,
#ifdef CONFIG_ACPI
FIX_ACPI_BEGIN,
FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
#endif
#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
FIX_OHCI1394_BASE,
#endif

View File

@ -50,10 +50,6 @@ enum fixed_addresses {
FIX_PARAVIRT_BOOTMAP,
#endif
__end_of_permanent_fixed_addresses,
#ifdef CONFIG_ACPI
FIX_ACPI_BEGIN,
FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
#endif
#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
FIX_OHCI1394_BASE,
#endif

View File

@ -5,6 +5,7 @@
#include <linux/compiler.h>
#include <asm-generic/int-ll64.h>
#include <asm/page.h>
#define build_mmio_read(name, size, type, reg, barrier) \
static inline type name(const volatile void __iomem *addr) \
@ -80,6 +81,95 @@ static inline void writeq(__u64 val, volatile void __iomem *addr)
#define readq readq
#define writeq writeq
/**
* virt_to_phys - map virtual addresses to physical
* @address: address to remap
*
* The returned physical address is the physical (CPU) mapping for
* the memory address given. It is only valid to use this function on
* addresses directly mapped or allocated via kmalloc.
*
* This function does not give bus mappings for DMA transfers. In
* almost all conceivable cases a device driver should not be using
* this function
*/
static inline phys_addr_t virt_to_phys(volatile void *address)
{
return __pa(address);
}
/**
* phys_to_virt - map physical address to virtual
* @address: address to remap
*
* The returned virtual address is a current CPU mapping for
* the memory address given. It is only valid to use this function on
* addresses that have a kernel mapping
*
* This function does not handle bus mappings for DMA transfers. In
* almost all conceivable cases a device driver should not be using
* this function
*/
static inline void *phys_to_virt(phys_addr_t address)
{
return __va(address);
}
/*
* Change "struct page" to physical address.
*/
#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
/*
* ISA I/O bus memory addresses are 1:1 with the physical address.
*/
#define isa_virt_to_bus virt_to_phys
#define isa_page_to_bus page_to_phys
#define isa_bus_to_virt phys_to_virt
/*
* However PCI ones are not necessarily 1:1 and therefore these interfaces
* are forbidden in portable PCI drivers.
*
* Allow them on x86 for legacy drivers, though.
*/
#define virt_to_bus virt_to_phys
#define bus_to_virt phys_to_virt
/**
* ioremap - map bus memory into CPU space
* @offset: bus address of the memory
* @size: size of the resource to map
*
* ioremap performs a platform specific sequence of operations to
* make bus memory CPU accessible via the readb/readw/readl/writeb/
* writew/writel functions and the other mmio helpers. The returned
* address is not guaranteed to be usable directly as a virtual
* address.
*
* If the area you are trying to map is a PCI BAR you should have a
* look at pci_iomap().
*/
extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
unsigned long prot_val);
/*
* The default ioremap() behavior is non-cached:
*/
static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
{
return ioremap_nocache(offset, size);
}
extern void iounmap(volatile void __iomem *addr);
extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
#ifdef CONFIG_X86_32
# include "io_32.h"
#else

View File

@ -51,92 +51,6 @@
*/
#define xlate_dev_kmem_ptr(p) p
/**
* virt_to_phys - map virtual addresses to physical
* @address: address to remap
*
* The returned physical address is the physical (CPU) mapping for
* the memory address given. It is only valid to use this function on
* addresses directly mapped or allocated via kmalloc.
*
* This function does not give bus mappings for DMA transfers. In
* almost all conceivable cases a device driver should not be using
* this function
*/
static inline unsigned long virt_to_phys(volatile void *address)
{
return __pa(address);
}
/**
* phys_to_virt - map physical address to virtual
* @address: address to remap
*
* The returned virtual address is a current CPU mapping for
* the memory address given. It is only valid to use this function on
* addresses that have a kernel mapping
*
* This function does not handle bus mappings for DMA transfers. In
* almost all conceivable cases a device driver should not be using
* this function
*/
static inline void *phys_to_virt(unsigned long address)
{
return __va(address);
}
/*
* Change "struct page" to physical address.
*/
#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
/**
* ioremap - map bus memory into CPU space
* @offset: bus address of the memory
* @size: size of the resource to map
*
* ioremap performs a platform specific sequence of operations to
* make bus memory CPU accessible via the readb/readw/readl/writeb/
* writew/writel functions and the other mmio helpers. The returned
* address is not guaranteed to be usable directly as a virtual
* address.
*
* If the area you are trying to map is a PCI BAR you should have a
* look at pci_iomap().
*/
extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
unsigned long prot_val);
/*
* The default ioremap() behavior is non-cached:
*/
static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
{
return ioremap_nocache(offset, size);
}
extern void iounmap(volatile void __iomem *addr);
/*
* ISA I/O bus memory addresses are 1:1 with the physical address.
*/
#define isa_virt_to_bus virt_to_phys
#define isa_page_to_bus page_to_phys
#define isa_bus_to_virt phys_to_virt
/*
* However PCI ones are not necessarily 1:1 and therefore these interfaces
* are forbidden in portable PCI drivers.
*
* Allow them on x86 for legacy drivers, though.
*/
#define virt_to_bus virt_to_phys
#define bus_to_virt phys_to_virt
static inline void
memset_io(volatile void __iomem *addr, unsigned char val, int count)
{

View File

@ -140,67 +140,8 @@ __OUTS(l)
#include <linux/vmalloc.h>
#ifndef __i386__
/*
* Change virtual addresses to physical addresses and vv.
* These are pretty trivial
*/
static inline unsigned long virt_to_phys(volatile void *address)
{
return __pa(address);
}
static inline void *phys_to_virt(unsigned long address)
{
return __va(address);
}
#endif
/*
* Change "struct page" to physical address.
*/
#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
#include <asm-generic/iomap.h>
/*
* This one maps high address device memory and turns off caching for that area.
* it's useful if some control registers are in such an area and write combining
* or read caching is not desirable:
*/
extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
unsigned long prot_val);
/*
* The default ioremap() behavior is non-cached:
*/
static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
{
return ioremap_nocache(offset, size);
}
extern void iounmap(volatile void __iomem *addr);
extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
/*
* ISA I/O bus memory addresses are 1:1 with the physical address.
*/
#define isa_virt_to_bus virt_to_phys
#define isa_page_to_bus page_to_phys
#define isa_bus_to_virt phys_to_virt
/*
* However PCI ones are not necessarily 1:1 and therefore these interfaces
* are forbidden in portable PCI drivers.
*
* Allow them on x86 for legacy drivers, though.
*/
#define virt_to_bus virt_to_phys
#define bus_to_virt phys_to_virt
void __memcpy_fromio(void *, unsigned long, unsigned);
void __memcpy_toio(unsigned long, const void *, unsigned);

View File

@ -9,23 +9,8 @@
# define PAGES_NR 4
#else
# define PA_CONTROL_PAGE 0
# define VA_CONTROL_PAGE 1
# define PA_PGD 2
# define VA_PGD 3
# define PA_PUD_0 4
# define VA_PUD_0 5
# define PA_PMD_0 6
# define VA_PMD_0 7
# define PA_PTE_0 8
# define VA_PTE_0 9
# define PA_PUD_1 10
# define VA_PUD_1 11
# define PA_PMD_1 12
# define VA_PMD_1 13
# define PA_PTE_1 14
# define VA_PTE_1 15
# define PA_TABLE_PAGE 16
# define PAGES_NR 17
# define PA_TABLE_PAGE 1
# define PAGES_NR 2
#endif
#ifdef CONFIG_X86_32
@ -157,9 +142,9 @@ relocate_kernel(unsigned long indirection_page,
unsigned long start_address) ATTRIB_NORET;
#endif
#ifdef CONFIG_X86_32
#define ARCH_HAS_KIMAGE_ARCH
#ifdef CONFIG_X86_32
struct kimage_arch {
pgd_t *pgd;
#ifdef CONFIG_X86_PAE
@ -169,6 +154,12 @@ struct kimage_arch {
pte_t *pte0;
pte_t *pte1;
};
#else
struct kimage_arch {
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
};
#endif
#endif /* __ASSEMBLY__ */

View File

@ -95,6 +95,11 @@ static inline pgdval_t native_pgd_val(pgd_t pgd)
return pgd.pgd;
}
static inline pgdval_t pgd_flags(pgd_t pgd)
{
return native_pgd_val(pgd) & PTE_FLAGS_MASK;
}
#if PAGETABLE_LEVELS >= 3
#if PAGETABLE_LEVELS == 4
typedef struct { pudval_t pud; } pud_t;
@ -117,6 +122,11 @@ static inline pudval_t native_pud_val(pud_t pud)
}
#endif /* PAGETABLE_LEVELS == 4 */
static inline pudval_t pud_flags(pud_t pud)
{
return native_pud_val(pud) & PTE_FLAGS_MASK;
}
typedef struct { pmdval_t pmd; } pmd_t;
static inline pmd_t native_make_pmd(pmdval_t val)
@ -128,6 +138,7 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
{
return pmd.pmd;
}
#else /* PAGETABLE_LEVELS == 2 */
#include <asm-generic/pgtable-nopmd.h>
@ -137,6 +148,11 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
}
#endif /* PAGETABLE_LEVELS >= 3 */
static inline pmdval_t pmd_flags(pmd_t pmd)
{
return native_pmd_val(pmd) & PTE_FLAGS_MASK;
}
static inline pte_t native_make_pte(pteval_t val)
{
return (pte_t) { .pte = val };

View File

@ -53,8 +53,6 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
#endif
#define pte_none(x) (!(x).pte_low)
/*
* Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken,
* split up the 29 bits of offset into this range:

View File

@ -18,21 +18,6 @@
printk("%s:%d: bad pgd %p(%016Lx).\n", \
__FILE__, __LINE__, &(e), pgd_val(e))
static inline int pud_none(pud_t pud)
{
return pud_val(pud) == 0;
}
static inline int pud_bad(pud_t pud)
{
return (pud_val(pud) & ~(PTE_PFN_MASK | _KERNPG_TABLE | _PAGE_USER)) != 0;
}
static inline int pud_present(pud_t pud)
{
return pud_val(pud) & _PAGE_PRESENT;
}
/* Rules for using set_pte: the pte being assigned *must* be
* either not present or in a state where the hardware will
* not attempt to update the pte. In places where this is
@ -120,15 +105,6 @@ static inline void pud_clear(pud_t *pudp)
write_cr3(pgd);
}
#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PTE_PFN_MASK))
/* Find an entry in the second-level page table.. */
#define pmd_offset(pud, address) ((pmd_t *)pud_page_vaddr(*(pud)) + \
pmd_index(address))
#ifdef CONFIG_SMP
static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
{
@ -145,17 +121,6 @@ static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
#endif
#define __HAVE_ARCH_PTE_SAME
static inline int pte_same(pte_t a, pte_t b)
{
return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
}
static inline int pte_none(pte_t pte)
{
return !pte.pte_low && !pte.pte_high;
}
/*
* Bits 0, 6 and 7 are taken in the low part of the pte,
* put the 32 bits of offset into the high part.

View File

@ -1,6 +1,8 @@
#ifndef _ASM_X86_PGTABLE_H
#define _ASM_X86_PGTABLE_H
#include <asm/page.h>
#define FIRST_USER_ADDRESS 0
#define _PAGE_BIT_PRESENT 0 /* is present */
@ -236,7 +238,7 @@ static inline unsigned long pte_pfn(pte_t pte)
static inline int pmd_large(pmd_t pte)
{
return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
return (pmd_flags(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
(_PAGE_PSE | _PAGE_PRESENT);
}
@ -465,6 +467,190 @@ static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
# include "pgtable_64.h"
#endif
#ifndef __ASSEMBLY__
#include <linux/mm_types.h>
static inline int pte_none(pte_t pte)
{
return !pte.pte;
}
#define __HAVE_ARCH_PTE_SAME
static inline int pte_same(pte_t a, pte_t b)
{
return a.pte == b.pte;
}
static inline int pte_present(pte_t a)
{
return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
}
static inline int pmd_present(pmd_t pmd)
{
return pmd_flags(pmd) & _PAGE_PRESENT;
}
static inline int pmd_none(pmd_t pmd)
{
/* Only check low word on 32-bit platforms, since it might be
out of sync with upper half. */
return (unsigned long)native_pmd_val(pmd) == 0;
}
static inline unsigned long pmd_page_vaddr(pmd_t pmd)
{
return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK);
}
/*
* Currently stuck as a macro due to indirect forward reference to
* linux/mmzone.h's __section_mem_map_addr() definition:
*/
#define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
/*
* the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
*
* this macro returns the index of the entry in the pmd page which would
* control the given virtual address
*/
static inline unsigned pmd_index(unsigned long address)
{
return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
}
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*
* (Currently stuck as a macro because of indirect forward reference
* to linux/mm.h:page_to_nid())
*/
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
/*
* the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
*
* this function returns the index of the entry in the pte page which would
* control the given virtual address
*/
static inline unsigned pte_index(unsigned long address)
{
return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
}
static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
{
return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
}
static inline int pmd_bad(pmd_t pmd)
{
return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
}
static inline unsigned long pages_to_mb(unsigned long npg)
{
return npg >> (20 - PAGE_SHIFT);
}
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
#if PAGETABLE_LEVELS == 2
static inline int pud_large(pud_t pud)
{
return 0;
}
#endif
#if PAGETABLE_LEVELS > 2
static inline int pud_none(pud_t pud)
{
return native_pud_val(pud) == 0;
}
static inline int pud_present(pud_t pud)
{
return pud_flags(pud) & _PAGE_PRESENT;
}
static inline unsigned long pud_page_vaddr(pud_t pud)
{
return (unsigned long)__va((unsigned long)pud_val(pud) & PTE_PFN_MASK);
}
/*
* Currently stuck as a macro due to indirect forward reference to
* linux/mmzone.h's __section_mem_map_addr() definition:
*/
#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
/* Find an entry in the second-level page table.. */
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
{
return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
}
static inline unsigned long pmd_pfn(pmd_t pmd)
{
return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
}
static inline int pud_large(pud_t pud)
{
return (pud_flags(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
(_PAGE_PSE | _PAGE_PRESENT);
}
static inline int pud_bad(pud_t pud)
{
return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
}
#endif /* PAGETABLE_LEVELS > 2 */
#if PAGETABLE_LEVELS > 3
static inline int pgd_present(pgd_t pgd)
{
return pgd_flags(pgd) & _PAGE_PRESENT;
}
static inline unsigned long pgd_page_vaddr(pgd_t pgd)
{
return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
}
/*
* Currently stuck as a macro due to indirect forward reference to
* linux/mmzone.h's __section_mem_map_addr() definition:
*/
#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
/* to find an entry in a page-table-directory. */
static inline unsigned pud_index(unsigned long address)
{
return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
}
static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
{
return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
}
static inline int pgd_bad(pgd_t pgd)
{
return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
}
static inline int pgd_none(pgd_t pgd)
{
return !native_pgd_val(pgd);
}
#endif /* PAGETABLE_LEVELS > 3 */
#endif /* __ASSEMBLY__ */
/*
* the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
*

View File

@ -85,55 +85,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
/* The boot page tables (all created as a single array) */
extern unsigned long pg0[];
#define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
/* To avoid harmful races, pmd_none(x) should check only the lower when PAE */
#define pmd_none(x) (!(unsigned long)pmd_val((x)))
#define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT)
#define pmd_bad(x) ((pmd_val(x) & (PTE_FLAGS_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
#ifdef CONFIG_X86_PAE
# include <asm/pgtable-3level.h>
#else
# include <asm/pgtable-2level.h>
#endif
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*/
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
static inline int pud_large(pud_t pud) { return 0; }
/*
* the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
*
* this macro returns the index of the entry in the pmd page which would
* control the given virtual address
*/
#define pmd_index(address) \
(((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
/*
* the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
*
* this macro returns the index of the entry in the pte page which would
* control the given virtual address
*/
#define pte_index(address) \
(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#define pte_offset_kernel(dir, address) \
((pte_t *)pmd_page_vaddr(*(dir)) + pte_index((address)))
#define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT))
#define pmd_page_vaddr(pmd) \
((unsigned long)__va(pmd_val((pmd)) & PTE_PFN_MASK))
#if defined(CONFIG_HIGHPTE)
#define pte_offset_map(dir, address) \
((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE0) + \
@ -176,7 +133,4 @@ do { \
#define kern_addr_valid(kaddr) (0)
#endif
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
#endif /* _ASM_X86_PGTABLE_32_H */

View File

@ -66,9 +66,6 @@ extern void paging_init(void);
printk("%s:%d: bad pgd %p(%016lx).\n", \
__FILE__, __LINE__, &(e), pgd_val(e))
#define pgd_none(x) (!pgd_val(x))
#define pud_none(x) (!pud_val(x))
struct mm_struct;
void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
@ -133,8 +130,6 @@ static inline void native_pgd_clear(pgd_t *pgd)
native_set_pgd(pgd, native_make_pgd(0));
}
#define pte_same(a, b) ((a).pte == (b).pte)
#endif /* !__ASSEMBLY__ */
#define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
@ -155,26 +150,6 @@ static inline void native_pgd_clear(pgd_t *pgd)
#ifndef __ASSEMBLY__
static inline int pgd_bad(pgd_t pgd)
{
return (pgd_val(pgd) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE;
}
static inline int pud_bad(pud_t pud)
{
return (pud_val(pud) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE;
}
static inline int pmd_bad(pmd_t pmd)
{
return (pmd_val(pmd) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE;
}
#define pte_none(x) (!pte_val((x)))
#define pte_present(x) (pte_val((x)) & (_PAGE_PRESENT | _PAGE_PROTNONE))
#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) /* FIXME: is this right? */
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
@ -183,41 +158,12 @@ static inline int pmd_bad(pmd_t pmd)
/*
* Level 4 access.
*/
#define pgd_page_vaddr(pgd) \
((unsigned long)__va((unsigned long)pgd_val((pgd)) & PTE_PFN_MASK))
#define pgd_page(pgd) (pfn_to_page(pgd_val((pgd)) >> PAGE_SHIFT))
#define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT)
static inline int pgd_large(pgd_t pgd) { return 0; }
#define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
/* PUD - Level3 access */
/* to find an entry in a page-table-directory. */
#define pud_page_vaddr(pud) \
((unsigned long)__va(pud_val((pud)) & PHYSICAL_PAGE_MASK))
#define pud_page(pud) (pfn_to_page(pud_val((pud)) >> PAGE_SHIFT))
#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
#define pud_offset(pgd, address) \
((pud_t *)pgd_page_vaddr(*(pgd)) + pud_index((address)))
#define pud_present(pud) (pud_val((pud)) & _PAGE_PRESENT)
static inline int pud_large(pud_t pte)
{
return (pud_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
(_PAGE_PSE | _PAGE_PRESENT);
}
/* PMD - Level 2 access */
#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val((pmd)) & PTE_PFN_MASK))
#define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT))
#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
#define pmd_offset(dir, address) ((pmd_t *)pud_page_vaddr(*(dir)) + \
pmd_index(address))
#define pmd_none(x) (!pmd_val((x)))
#define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT)
#define pfn_pmd(nr, prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val((prot))))
#define pmd_pfn(x) ((pmd_val((x)) & __PHYSICAL_MASK) >> PAGE_SHIFT)
#define pte_to_pgoff(pte) ((pte_val((pte)) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
#define pgoff_to_pte(off) ((pte_t) { .pte = ((off) << PAGE_SHIFT) | \
_PAGE_FILE })
@ -225,13 +171,6 @@ static inline int pud_large(pud_t pte)
/* PTE - Level 1 access. */
/* page, protection -> pte */
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn((page)), (pgprot))
#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_vaddr(*(dir)) + \
pte_index((address)))
/* x86-64 always has all page tables mapped. */
#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
#define pte_offset_map_nested(dir, address) pte_offset_kernel((dir), (address))
@ -265,9 +204,6 @@ extern int direct_gbpages;
extern int kern_addr_valid(unsigned long addr);
extern void cleanup_highmap(void);
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN

View File

@ -108,35 +108,18 @@ enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC;
*/
char *__init __acpi_map_table(unsigned long phys, unsigned long size)
{
unsigned long base, offset, mapped_size;
int idx;
if (!phys || !size)
return NULL;
if (phys+size <= (max_low_pfn_mapped << PAGE_SHIFT))
return __va(phys);
return early_ioremap(phys, size);
}
void __init __acpi_unmap_table(char *map, unsigned long size)
{
if (!map || !size)
return;
offset = phys & (PAGE_SIZE - 1);
mapped_size = PAGE_SIZE - offset;
clear_fixmap(FIX_ACPI_END);
set_fixmap(FIX_ACPI_END, phys);
base = fix_to_virt(FIX_ACPI_END);
/*
* Most cases can be covered by the below.
*/
idx = FIX_ACPI_END;
while (mapped_size < size) {
if (--idx < FIX_ACPI_BEGIN)
return NULL; /* cannot handle this */
phys += PAGE_SIZE;
clear_fixmap(idx);
set_fixmap(idx, phys);
mapped_size += PAGE_SIZE;
}
return ((unsigned char *)base + offset);
early_iounmap(map, size);
}
#ifdef CONFIG_PCI_MMCONFIG

View File

@ -555,7 +555,8 @@ static void __init lapic_cal_handler(struct clock_event_device *dev)
}
}
static int __init calibrate_by_pmtimer(long deltapm, long *delta)
static int __init
calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc)
{
const long pm_100ms = PMTMR_TICKS_PER_SEC / 10;
const long pm_thresh = pm_100ms / 100;
@ -566,7 +567,7 @@ static int __init calibrate_by_pmtimer(long deltapm, long *delta)
return -1;
#endif
apic_printk(APIC_VERBOSE, "... PM timer delta = %ld\n", deltapm);
apic_printk(APIC_VERBOSE, "... PM-Timer delta = %ld\n", deltapm);
/* Check, if the PM timer is available */
if (!deltapm)
@ -576,19 +577,30 @@ static int __init calibrate_by_pmtimer(long deltapm, long *delta)
if (deltapm > (pm_100ms - pm_thresh) &&
deltapm < (pm_100ms + pm_thresh)) {
apic_printk(APIC_VERBOSE, "... PM timer result ok\n");
} else {
res = (((u64)deltapm) * mult) >> 22;
do_div(res, 1000000);
pr_warning("APIC calibration not consistent "
"with PM Timer: %ldms instead of 100ms\n",
(long)res);
/* Correct the lapic counter value */
res = (((u64)(*delta)) * pm_100ms);
apic_printk(APIC_VERBOSE, "... PM-Timer result ok\n");
return 0;
}
res = (((u64)deltapm) * mult) >> 22;
do_div(res, 1000000);
pr_warning("APIC calibration not consistent "
"with PM-Timer: %ldms instead of 100ms\n",(long)res);
/* Correct the lapic counter value */
res = (((u64)(*delta)) * pm_100ms);
do_div(res, deltapm);
pr_info("APIC delta adjusted to PM-Timer: "
"%lu (%ld)\n", (unsigned long)res, *delta);
*delta = (long)res;
/* Correct the tsc counter value */
if (cpu_has_tsc) {
res = (((u64)(*deltatsc)) * pm_100ms);
do_div(res, deltapm);
pr_info("APIC delta adjusted to PM-Timer: "
"%lu (%ld)\n", (unsigned long)res, *delta);
*delta = (long)res;
apic_printk(APIC_VERBOSE, "TSC delta adjusted to "
"PM-Timer: %lu (%ld) \n",
(unsigned long)res, *deltatsc);
*deltatsc = (long)res;
}
return 0;
@ -599,7 +611,7 @@ static int __init calibrate_APIC_clock(void)
struct clock_event_device *levt = &__get_cpu_var(lapic_events);
void (*real_handler)(struct clock_event_device *dev);
unsigned long deltaj;
long delta;
long delta, deltatsc;
int pm_referenced = 0;
local_irq_disable();
@ -629,9 +641,11 @@ static int __init calibrate_APIC_clock(void)
delta = lapic_cal_t1 - lapic_cal_t2;
apic_printk(APIC_VERBOSE, "... lapic delta = %ld\n", delta);
deltatsc = (long)(lapic_cal_tsc2 - lapic_cal_tsc1);
/* we trust the PM based calibration if possible */
pm_referenced = !calibrate_by_pmtimer(lapic_cal_pm2 - lapic_cal_pm1,
&delta);
&delta, &deltatsc);
/* Calculate the scaled math multiplication factor */
lapic_clockevent.mult = div_sc(delta, TICK_NSEC * LAPIC_CAL_LOOPS,
@ -649,11 +663,10 @@ static int __init calibrate_APIC_clock(void)
calibration_result);
if (cpu_has_tsc) {
delta = (long)(lapic_cal_tsc2 - lapic_cal_tsc1);
apic_printk(APIC_VERBOSE, "..... CPU clock speed is "
"%ld.%04ld MHz.\n",
(delta / LAPIC_CAL_LOOPS) / (1000000 / HZ),
(delta / LAPIC_CAL_LOOPS) % (1000000 / HZ));
(deltatsc / LAPIC_CAL_LOOPS) / (1000000 / HZ),
(deltatsc / LAPIC_CAL_LOOPS) % (1000000 / HZ));
}
apic_printk(APIC_VERBOSE, "..... host bus clock speed is "

View File

@ -99,7 +99,7 @@ print_context_stack(struct thread_info *tinfo,
frame = frame->next_frame;
bp = (unsigned long) frame;
} else {
ops->address(data, addr, bp == 0);
ops->address(data, addr, 0);
}
print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
}

View File

@ -13,8 +13,8 @@
#include <asm/setup.h>
#include <xen/hvc-console.h>
#include <asm/pci-direct.h>
#include <asm/pgtable.h>
#include <asm/fixmap.h>
#include <asm/pgtable.h>
#include <linux/usb/ehci_def.h>
/* Simple VGA output */

View File

@ -287,24 +287,31 @@ int __init find_unisys_acpi_oem_table(unsigned long *oem_addr)
{
struct acpi_table_header *header = NULL;
int i = 0;
acpi_size tbl_size;
while (ACPI_SUCCESS(acpi_get_table("OEM1", i++, &header))) {
while (ACPI_SUCCESS(acpi_get_table_with_size("OEM1", i++, &header, &tbl_size))) {
if (!memcmp((char *) &header->oem_id, "UNISYS", 6)) {
struct oem_table *t = (struct oem_table *)header;
oem_addrX = t->OEMTableAddr;
oem_size = t->OEMTableSize;
early_acpi_os_unmap_memory(header, tbl_size);
*oem_addr = (unsigned long)__acpi_map_table(oem_addrX,
oem_size);
return 0;
}
early_acpi_os_unmap_memory(header, tbl_size);
}
return -1;
}
void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr)
{
if (!oem_addr)
return;
__acpi_unmap_table((char *)oem_addr, oem_size);
}
#endif

View File

@ -18,15 +18,6 @@
#include <asm/mmu_context.h>
#include <asm/io.h>
#define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE)))
static u64 kexec_pgd[512] PAGE_ALIGNED;
static u64 kexec_pud0[512] PAGE_ALIGNED;
static u64 kexec_pmd0[512] PAGE_ALIGNED;
static u64 kexec_pte0[512] PAGE_ALIGNED;
static u64 kexec_pud1[512] PAGE_ALIGNED;
static u64 kexec_pmd1[512] PAGE_ALIGNED;
static u64 kexec_pte1[512] PAGE_ALIGNED;
static void init_level2_page(pmd_t *level2p, unsigned long addr)
{
unsigned long end_addr;
@ -107,12 +98,65 @@ out:
return result;
}
static void free_transition_pgtable(struct kimage *image)
{
free_page((unsigned long)image->arch.pud);
free_page((unsigned long)image->arch.pmd);
free_page((unsigned long)image->arch.pte);
}
static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
{
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
unsigned long vaddr, paddr;
int result = -ENOMEM;
vaddr = (unsigned long)relocate_kernel;
paddr = __pa(page_address(image->control_code_page)+PAGE_SIZE);
pgd += pgd_index(vaddr);
if (!pgd_present(*pgd)) {
pud = (pud_t *)get_zeroed_page(GFP_KERNEL);
if (!pud)
goto err;
image->arch.pud = pud;
set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
}
pud = pud_offset(pgd, vaddr);
if (!pud_present(*pud)) {
pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL);
if (!pmd)
goto err;
image->arch.pmd = pmd;
set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
}
pmd = pmd_offset(pud, vaddr);
if (!pmd_present(*pmd)) {
pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
if (!pte)
goto err;
image->arch.pte = pte;
set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
}
pte = pte_offset_kernel(pmd, vaddr);
set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
return 0;
err:
free_transition_pgtable(image);
return result;
}
static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
{
pgd_t *level4p;
int result;
level4p = (pgd_t *)__va(start_pgtable);
return init_level4_page(image, level4p, 0, max_pfn << PAGE_SHIFT);
result = init_level4_page(image, level4p, 0, max_pfn << PAGE_SHIFT);
if (result)
return result;
return init_transition_pgtable(image, level4p);
}
static void set_idt(void *newidt, u16 limit)
@ -174,7 +218,7 @@ int machine_kexec_prepare(struct kimage *image)
void machine_kexec_cleanup(struct kimage *image)
{
return;
free_transition_pgtable(image);
}
/*
@ -195,22 +239,6 @@ void machine_kexec(struct kimage *image)
memcpy(control_page, relocate_kernel, PAGE_SIZE);
page_list[PA_CONTROL_PAGE] = virt_to_phys(control_page);
page_list[VA_CONTROL_PAGE] = (unsigned long)relocate_kernel;
page_list[PA_PGD] = virt_to_phys(&kexec_pgd);
page_list[VA_PGD] = (unsigned long)kexec_pgd;
page_list[PA_PUD_0] = virt_to_phys(&kexec_pud0);
page_list[VA_PUD_0] = (unsigned long)kexec_pud0;
page_list[PA_PMD_0] = virt_to_phys(&kexec_pmd0);
page_list[VA_PMD_0] = (unsigned long)kexec_pmd0;
page_list[PA_PTE_0] = virt_to_phys(&kexec_pte0);
page_list[VA_PTE_0] = (unsigned long)kexec_pte0;
page_list[PA_PUD_1] = virt_to_phys(&kexec_pud1);
page_list[VA_PUD_1] = (unsigned long)kexec_pud1;
page_list[PA_PMD_1] = virt_to_phys(&kexec_pmd1);
page_list[VA_PMD_1] = (unsigned long)kexec_pmd1;
page_list[PA_PTE_1] = virt_to_phys(&kexec_pte1);
page_list[VA_PTE_1] = (unsigned long)kexec_pte1;
page_list[PA_TABLE_PAGE] =
(unsigned long)__pa(page_address(image->control_code_page));

View File

@ -29,122 +29,6 @@ relocate_kernel:
* %rdx start address
*/
/* map the control page at its virtual address */
movq $0x0000ff8000000000, %r10 /* mask */
mov $(39 - 3), %cl /* bits to shift */
movq PTR(VA_CONTROL_PAGE)(%rsi), %r11 /* address to map */
movq %r11, %r9
andq %r10, %r9
shrq %cl, %r9
movq PTR(VA_PGD)(%rsi), %r8
addq %r8, %r9
movq PTR(PA_PUD_0)(%rsi), %r8
orq $PAGE_ATTR, %r8
movq %r8, (%r9)
shrq $9, %r10
sub $9, %cl
movq %r11, %r9
andq %r10, %r9
shrq %cl, %r9
movq PTR(VA_PUD_0)(%rsi), %r8
addq %r8, %r9
movq PTR(PA_PMD_0)(%rsi), %r8
orq $PAGE_ATTR, %r8
movq %r8, (%r9)
shrq $9, %r10
sub $9, %cl
movq %r11, %r9
andq %r10, %r9
shrq %cl, %r9
movq PTR(VA_PMD_0)(%rsi), %r8
addq %r8, %r9
movq PTR(PA_PTE_0)(%rsi), %r8
orq $PAGE_ATTR, %r8
movq %r8, (%r9)
shrq $9, %r10
sub $9, %cl
movq %r11, %r9
andq %r10, %r9
shrq %cl, %r9
movq PTR(VA_PTE_0)(%rsi), %r8
addq %r8, %r9
movq PTR(PA_CONTROL_PAGE)(%rsi), %r8
orq $PAGE_ATTR, %r8
movq %r8, (%r9)
/* identity map the control page at its physical address */
movq $0x0000ff8000000000, %r10 /* mask */
mov $(39 - 3), %cl /* bits to shift */
movq PTR(PA_CONTROL_PAGE)(%rsi), %r11 /* address to map */
movq %r11, %r9
andq %r10, %r9
shrq %cl, %r9
movq PTR(VA_PGD)(%rsi), %r8
addq %r8, %r9
movq PTR(PA_PUD_1)(%rsi), %r8
orq $PAGE_ATTR, %r8
movq %r8, (%r9)
shrq $9, %r10
sub $9, %cl
movq %r11, %r9
andq %r10, %r9
shrq %cl, %r9
movq PTR(VA_PUD_1)(%rsi), %r8
addq %r8, %r9
movq PTR(PA_PMD_1)(%rsi), %r8
orq $PAGE_ATTR, %r8
movq %r8, (%r9)
shrq $9, %r10
sub $9, %cl
movq %r11, %r9
andq %r10, %r9
shrq %cl, %r9
movq PTR(VA_PMD_1)(%rsi), %r8
addq %r8, %r9
movq PTR(PA_PTE_1)(%rsi), %r8
orq $PAGE_ATTR, %r8
movq %r8, (%r9)
shrq $9, %r10
sub $9, %cl
movq %r11, %r9
andq %r10, %r9
shrq %cl, %r9
movq PTR(VA_PTE_1)(%rsi), %r8
addq %r8, %r9
movq PTR(PA_CONTROL_PAGE)(%rsi), %r8
orq $PAGE_ATTR, %r8
movq %r8, (%r9)
relocate_new_kernel:
/* %rdi indirection_page
* %rsi page_list
* %rdx start address
*/
/* zero out flags, and disable interrupts */
pushq $0
popfq
@ -156,9 +40,8 @@ relocate_new_kernel:
/* get physical address of page table now too */
movq PTR(PA_TABLE_PAGE)(%rsi), %rcx
/* switch to new set of page tables */
movq PTR(PA_PGD)(%rsi), %r9
movq %r9, %cr3
/* Switch to the identity mapped page tables */
movq %rcx, %cr3
/* setup a new stack at the end of the physical control page */
lea PAGE_SIZE(%r8), %rsp
@ -194,9 +77,7 @@ identity_mapped:
jmp 1f
1:
/* Switch to the identity mapped page tables,
* and flush the TLB.
*/
/* Flush the TLB (needed?) */
movq %rcx, %cr3
/* Do the copies */

View File

@ -29,6 +29,7 @@
#include <asm/page.h>
#include <asm/msr.h>
#include <asm/segment.h>
#include <asm/processor-flags.h>
.section .rodata, "a", @progbits
@ -37,7 +38,7 @@
ENTRY(trampoline_data)
r_base = .
cli # We should be safe anyway
wbinvd
wbinvd
mov %cs, %ax # Code and data in the same place
mov %ax, %ds
mov %ax, %es
@ -73,9 +74,8 @@ r_base = .
lidtl tidt - r_base # load idt with 0, 0
lgdtl tgdt - r_base # load gdt with whatever is appropriate
xor %ax, %ax
inc %ax # protected mode (PE) bit
lmsw %ax # into protected mode
mov $X86_CR0_PE, %ax # protected mode (PE) bit
lmsw %ax # into protected mode
# flush prefetch and jump to startup_32
ljmpl *(startup_32_vector - r_base)
@ -86,9 +86,8 @@ startup_32:
movl $__KERNEL_DS, %eax # Initialize the %ds segment register
movl %eax, %ds
xorl %eax, %eax
btsl $5, %eax # Enable PAE mode
movl %eax, %cr4
movl $X86_CR4_PAE, %eax
movl %eax, %cr4 # Enable PAE mode
# Setup trampoline 4 level pagetables
leal (trampoline_level4_pgt - r_base)(%esi), %eax
@ -99,9 +98,9 @@ startup_32:
xorl %edx, %edx
wrmsr
xorl %eax, %eax
btsl $31, %eax # Enable paging and in turn activate Long Mode
btsl $0, %eax # Enable protected mode
# Enable paging and in turn activate Long Mode
# Enable protected mode
movl $(X86_CR0_PG | X86_CR0_PE), %eax
movl %eax, %cr0
/*

View File

@ -674,6 +674,86 @@ static int __init parse_highmem(char *arg)
}
early_param("highmem", parse_highmem);
#define MSG_HIGHMEM_TOO_BIG \
"highmem size (%luMB) is bigger than pages available (%luMB)!\n"
#define MSG_LOWMEM_TOO_SMALL \
"highmem size (%luMB) results in <64MB lowmem, ignoring it!\n"
/*
* All of RAM fits into lowmem - but if user wants highmem
* artificially via the highmem=x boot parameter then create
* it:
*/
void __init lowmem_pfn_init(void)
{
/* max_low_pfn is 0, we already have early_res support */
max_low_pfn = max_pfn;
if (highmem_pages == -1)
highmem_pages = 0;
#ifdef CONFIG_HIGHMEM
if (highmem_pages >= max_pfn) {
printk(KERN_ERR MSG_HIGHMEM_TOO_BIG,
pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
highmem_pages = 0;
}
if (highmem_pages) {
if (max_low_pfn - highmem_pages < 64*1024*1024/PAGE_SIZE) {
printk(KERN_ERR MSG_LOWMEM_TOO_SMALL,
pages_to_mb(highmem_pages));
highmem_pages = 0;
}
max_low_pfn -= highmem_pages;
}
#else
if (highmem_pages)
printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
#endif
}
#define MSG_HIGHMEM_TOO_SMALL \
"only %luMB highmem pages available, ignoring highmem size of %luMB!\n"
#define MSG_HIGHMEM_TRIMMED \
"Warning: only 4GB will be used. Use a HIGHMEM64G enabled kernel!\n"
/*
* We have more RAM than fits into lowmem - we try to put it into
* highmem, also taking the highmem=x boot parameter into account:
*/
void __init highmem_pfn_init(void)
{
max_low_pfn = MAXMEM_PFN;
if (highmem_pages == -1)
highmem_pages = max_pfn - MAXMEM_PFN;
if (highmem_pages + MAXMEM_PFN < max_pfn)
max_pfn = MAXMEM_PFN + highmem_pages;
if (highmem_pages + MAXMEM_PFN > max_pfn) {
printk(KERN_WARNING MSG_HIGHMEM_TOO_SMALL,
pages_to_mb(max_pfn - MAXMEM_PFN),
pages_to_mb(highmem_pages));
highmem_pages = 0;
}
#ifndef CONFIG_HIGHMEM
/* Maximum memory usable is what is directly addressable */
printk(KERN_WARNING "Warning only %ldMB will be used.\n", MAXMEM>>20);
if (max_pfn > MAX_NONPAE_PFN)
printk(KERN_WARNING "Use a HIGHMEM64G enabled kernel.\n");
else
printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
max_pfn = MAXMEM_PFN;
#else /* !CONFIG_HIGHMEM */
#ifndef CONFIG_HIGHMEM64G
if (max_pfn > MAX_NONPAE_PFN) {
max_pfn = MAX_NONPAE_PFN;
printk(KERN_WARNING MSG_HIGHMEM_TRIMMED);
}
#endif /* !CONFIG_HIGHMEM64G */
#endif /* !CONFIG_HIGHMEM */
}
/*
* Determine low and high memory ranges:
*/
@ -681,68 +761,10 @@ void __init find_low_pfn_range(void)
{
/* it could update max_pfn */
/* max_low_pfn is 0, we already have early_res support */
max_low_pfn = max_pfn;
if (max_low_pfn > MAXMEM_PFN) {
if (highmem_pages == -1)
highmem_pages = max_pfn - MAXMEM_PFN;
if (highmem_pages + MAXMEM_PFN < max_pfn)
max_pfn = MAXMEM_PFN + highmem_pages;
if (highmem_pages + MAXMEM_PFN > max_pfn) {
printk(KERN_WARNING "only %luMB highmem pages "
"available, ignoring highmem size of %uMB.\n",
pages_to_mb(max_pfn - MAXMEM_PFN),
pages_to_mb(highmem_pages));
highmem_pages = 0;
}
max_low_pfn = MAXMEM_PFN;
#ifndef CONFIG_HIGHMEM
/* Maximum memory usable is what is directly addressable */
printk(KERN_WARNING "Warning only %ldMB will be used.\n",
MAXMEM>>20);
if (max_pfn > MAX_NONPAE_PFN)
printk(KERN_WARNING
"Use a HIGHMEM64G enabled kernel.\n");
else
printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
max_pfn = MAXMEM_PFN;
#else /* !CONFIG_HIGHMEM */
#ifndef CONFIG_HIGHMEM64G
if (max_pfn > MAX_NONPAE_PFN) {
max_pfn = MAX_NONPAE_PFN;
printk(KERN_WARNING "Warning only 4GB will be used."
"Use a HIGHMEM64G enabled kernel.\n");
}
#endif /* !CONFIG_HIGHMEM64G */
#endif /* !CONFIG_HIGHMEM */
} else {
if (highmem_pages == -1)
highmem_pages = 0;
#ifdef CONFIG_HIGHMEM
if (highmem_pages >= max_pfn) {
printk(KERN_ERR "highmem size specified (%uMB) is "
"bigger than pages available (%luMB)!.\n",
pages_to_mb(highmem_pages),
pages_to_mb(max_pfn));
highmem_pages = 0;
}
if (highmem_pages) {
if (max_low_pfn - highmem_pages <
64*1024*1024/PAGE_SIZE){
printk(KERN_ERR "highmem size %uMB results in "
"smaller than 64MB lowmem, ignoring it.\n"
, pages_to_mb(highmem_pages));
highmem_pages = 0;
}
max_low_pfn -= highmem_pages;
}
#else
if (highmem_pages)
printk(KERN_ERR "ignoring highmem size on non-highmem"
" kernel!\n");
#endif
}
if (max_pfn <= MAXMEM_PFN)
lowmem_pfn_init();
else
highmem_pfn_init();
}
#ifndef CONFIG_NEED_MULTIPLE_NODES

View File

@ -365,7 +365,7 @@ ACPI_EXPORT_SYMBOL(acpi_unload_table_id)
/*******************************************************************************
*
* FUNCTION: acpi_get_table
* FUNCTION: acpi_get_table_with_size
*
* PARAMETERS: Signature - ACPI signature of needed table
* Instance - Which instance (for SSDTs)
@ -377,8 +377,9 @@ ACPI_EXPORT_SYMBOL(acpi_unload_table_id)
*
*****************************************************************************/
acpi_status
acpi_get_table(char *signature,
u32 instance, struct acpi_table_header **out_table)
acpi_get_table_with_size(char *signature,
u32 instance, struct acpi_table_header **out_table,
acpi_size *tbl_size)
{
u32 i;
u32 j;
@ -408,6 +409,7 @@ acpi_get_table(char *signature,
acpi_tb_verify_table(&acpi_gbl_root_table_list.tables[i]);
if (ACPI_SUCCESS(status)) {
*out_table = acpi_gbl_root_table_list.tables[i].pointer;
*tbl_size = acpi_gbl_root_table_list.tables[i].length;
}
if (!acpi_gbl_permanent_mmap) {
@ -420,6 +422,15 @@ acpi_get_table(char *signature,
return (AE_NOT_FOUND);
}
acpi_status
acpi_get_table(char *signature,
u32 instance, struct acpi_table_header **out_table)
{
acpi_size tbl_size;
return acpi_get_table_with_size(signature,
instance, out_table, &tbl_size);
}
ACPI_EXPORT_SYMBOL(acpi_get_table)
/*******************************************************************************

View File

@ -274,12 +274,19 @@ EXPORT_SYMBOL_GPL(acpi_os_map_memory);
void acpi_os_unmap_memory(void __iomem * virt, acpi_size size)
{
if (acpi_gbl_permanent_mmap) {
if (acpi_gbl_permanent_mmap)
iounmap(virt);
}
else
__acpi_unmap_table(virt, size);
}
EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
void early_acpi_os_unmap_memory(void __iomem * virt, acpi_size size)
{
if (!acpi_gbl_permanent_mmap)
__acpi_unmap_table(virt, size);
}
#ifdef ACPI_FUTURE_USAGE
acpi_status
acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)

View File

@ -181,14 +181,15 @@ acpi_table_parse_entries(char *id,
struct acpi_subtable_header *entry;
unsigned int count = 0;
unsigned long table_end;
acpi_size tbl_size;
if (!handler)
return -EINVAL;
if (strncmp(id, ACPI_SIG_MADT, 4) == 0)
acpi_get_table(id, acpi_apic_instance, &table_header);
acpi_get_table_with_size(id, acpi_apic_instance, &table_header, &tbl_size);
else
acpi_get_table(id, 0, &table_header);
acpi_get_table_with_size(id, 0, &table_header, &tbl_size);
if (!table_header) {
printk(KERN_WARNING PREFIX "%4.4s not present\n", id);
@ -206,8 +207,10 @@ acpi_table_parse_entries(char *id,
table_end) {
if (entry->type == entry_id
&& (!max_entries || count++ < max_entries))
if (handler(entry, table_end))
if (handler(entry, table_end)) {
early_acpi_os_unmap_memory((char *)table_header, tbl_size);
return -EINVAL;
}
entry = (struct acpi_subtable_header *)
((unsigned long)entry + entry->length);
@ -217,6 +220,7 @@ acpi_table_parse_entries(char *id,
"%i found\n", id, entry_id, count - max_entries, count);
}
early_acpi_os_unmap_memory((char *)table_header, tbl_size);
return count;
}
@ -241,17 +245,19 @@ acpi_table_parse_madt(enum acpi_madt_type id,
int __init acpi_table_parse(char *id, acpi_table_handler handler)
{
struct acpi_table_header *table = NULL;
acpi_size tbl_size;
if (!handler)
return -EINVAL;
if (strncmp(id, ACPI_SIG_MADT, 4) == 0)
acpi_get_table(id, acpi_apic_instance, &table);
acpi_get_table_with_size(id, acpi_apic_instance, &table, &tbl_size);
else
acpi_get_table(id, 0, &table);
acpi_get_table_with_size(id, 0, &table, &tbl_size);
if (table) {
handler(table);
early_acpi_os_unmap_memory(table, tbl_size);
return 0;
} else
return 1;
@ -265,8 +271,9 @@ int __init acpi_table_parse(char *id, acpi_table_handler handler)
static void __init check_multiple_madt(void)
{
struct acpi_table_header *table = NULL;
acpi_size tbl_size;
acpi_get_table(ACPI_SIG_MADT, 2, &table);
acpi_get_table_with_size(ACPI_SIG_MADT, 2, &table, &tbl_size);
if (table) {
printk(KERN_WARNING PREFIX
"BIOS bug: multiple APIC/MADT found,"
@ -275,6 +282,7 @@ static void __init check_multiple_madt(void)
"If \"acpi_apic_instance=%d\" works better, "
"notify linux-acpi@vger.kernel.org\n",
acpi_apic_instance ? 0 : 2);
early_acpi_os_unmap_memory(table, tbl_size);
} else
acpi_apic_instance = 0;

View File

@ -938,8 +938,8 @@ static int __init ibft_init(void)
return -ENOMEM;
if (ibft_addr) {
printk(KERN_INFO "iBFT detected at 0x%lx.\n",
virt_to_phys((void *)ibft_addr));
printk(KERN_INFO "iBFT detected at 0x%llx.\n",
(u64)virt_to_phys((void *)ibft_addr));
rc = ibft_check_device();
if (rc)

View File

@ -678,9 +678,9 @@ static int drm__vma_info(char *buf, char **start, off_t offset, int request,
*start = &buf[offset];
*eof = 0;
DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%08lx\n",
DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%llx\n",
atomic_read(&dev->vma_count),
high_memory, virt_to_phys(high_memory));
high_memory, (u64)virt_to_phys(high_memory));
list_for_each_entry(pt, &dev->vmalist, head) {
if (!(vma = pt->vma))
continue;

View File

@ -150,7 +150,8 @@ static int __init ne3210_eisa_probe (struct device *device)
if (phys_mem < virt_to_phys(high_memory)) {
printk(KERN_CRIT "ne3210.c: Card RAM overlaps with normal memory!!!\n");
printk(KERN_CRIT "ne3210.c: Use EISA SCU to set card memory below 1MB,\n");
printk(KERN_CRIT "ne3210.c: or to an address above 0x%lx.\n", virt_to_phys(high_memory));
printk(KERN_CRIT "ne3210.c: or to an address above 0x%llx.\n",
(u64)virt_to_phys(high_memory));
printk(KERN_CRIT "ne3210.c: Driver NOT installed.\n");
retval = -EINVAL;
goto out3;

View File

@ -338,10 +338,10 @@ static int falcon_alloc_special_buffer(struct efx_nic *efx,
nic_data->next_buffer_table += buffer->entries;
EFX_LOG(efx, "allocating special buffers %d-%d at %llx+%x "
"(virt %p phys %lx)\n", buffer->index,
"(virt %p phys %llx)\n", buffer->index,
buffer->index + buffer->entries - 1,
(unsigned long long)buffer->dma_addr, len,
buffer->addr, virt_to_phys(buffer->addr));
(u64)buffer->dma_addr, len,
buffer->addr, (u64)virt_to_phys(buffer->addr));
return 0;
}
@ -353,10 +353,10 @@ static void falcon_free_special_buffer(struct efx_nic *efx,
return;
EFX_LOG(efx, "deallocating special buffers %d-%d at %llx+%x "
"(virt %p phys %lx)\n", buffer->index,
"(virt %p phys %llx)\n", buffer->index,
buffer->index + buffer->entries - 1,
(unsigned long long)buffer->dma_addr, buffer->len,
buffer->addr, virt_to_phys(buffer->addr));
(u64)buffer->dma_addr, buffer->len,
buffer->addr, (u64)virt_to_phys(buffer->addr));
pci_free_consistent(efx->pci_dev, buffer->len, buffer->addr,
buffer->dma_addr);
@ -2343,10 +2343,10 @@ int falcon_probe_port(struct efx_nic *efx)
FALCON_MAC_STATS_SIZE);
if (rc)
return rc;
EFX_LOG(efx, "stats buffer at %llx (virt %p phys %lx)\n",
(unsigned long long)efx->stats_buffer.dma_addr,
EFX_LOG(efx, "stats buffer at %llx (virt %p phys %llx)\n",
(u64)efx->stats_buffer.dma_addr,
efx->stats_buffer.addr,
virt_to_phys(efx->stats_buffer.addr));
(u64)virt_to_phys(efx->stats_buffer.addr));
return 0;
}
@ -2921,9 +2921,9 @@ int falcon_probe_nic(struct efx_nic *efx)
goto fail4;
BUG_ON(efx->irq_status.dma_addr & 0x0f);
EFX_LOG(efx, "INT_KER at %llx (virt %p phys %lx)\n",
(unsigned long long)efx->irq_status.dma_addr,
efx->irq_status.addr, virt_to_phys(efx->irq_status.addr));
EFX_LOG(efx, "INT_KER at %llx (virt %p phys %llx)\n",
(u64)efx->irq_status.dma_addr,
efx->irq_status.addr, (u64)virt_to_phys(efx->irq_status.addr));
falcon_probe_spi_devices(efx);

View File

@ -1082,8 +1082,8 @@ static int __init arlan_probe_here(struct net_device *dev,
if (arlan_check_fingerprint(memaddr))
return -ENODEV;
printk(KERN_NOTICE "%s: Arlan found at %x, \n ", dev->name,
(int) virt_to_phys((void*)memaddr));
printk(KERN_NOTICE "%s: Arlan found at %llx, \n ", dev->name,
(u64) virt_to_phys((void*)memaddr));
ap->card = (void *) memaddr;
dev->mem_start = memaddr;

View File

@ -42,6 +42,7 @@
LIST_HEAD(dmar_drhd_units);
static struct acpi_table_header * __initdata dmar_tbl;
static acpi_size dmar_tbl_size;
static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
{
@ -288,8 +289,9 @@ static int __init dmar_table_detect(void)
acpi_status status = AE_OK;
/* if we could find DMAR table, then there are DMAR devices */
status = acpi_get_table(ACPI_SIG_DMAR, 0,
(struct acpi_table_header **)&dmar_tbl);
status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
(struct acpi_table_header **)&dmar_tbl,
&dmar_tbl_size);
if (ACPI_SUCCESS(status) && !dmar_tbl) {
printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
@ -481,6 +483,7 @@ void __init detect_intel_iommu(void)
iommu_detected = 1;
#endif
}
early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
dmar_tbl = NULL;
}

View File

@ -144,6 +144,7 @@ void __iomem *acpi_os_map_memory(acpi_physical_address where,
acpi_size length);
void acpi_os_unmap_memory(void __iomem * logical_address, acpi_size size);
void early_acpi_os_unmap_memory(void __iomem * virt, acpi_size size);
#ifdef ACPI_FUTURE_USAGE
acpi_status

View File

@ -130,6 +130,10 @@ acpi_get_table_header(acpi_string signature,
struct acpi_table_header *out_table_header);
acpi_status
acpi_get_table_with_size(acpi_string signature,
u32 instance, struct acpi_table_header **out_table,
acpi_size *tbl_size);
acpi_status
acpi_get_table(acpi_string signature,
u32 instance, struct acpi_table_header **out_table);

View File

@ -79,6 +79,7 @@ typedef int (*acpi_table_handler) (struct acpi_table_header *table);
typedef int (*acpi_table_entry_handler) (struct acpi_subtable_header *header, const unsigned long end);
char * __acpi_map_table (unsigned long phys_addr, unsigned long size);
void __init __acpi_unmap_table(char *map, unsigned long size);
int early_acpi_boot_init(void);
int acpi_boot_init (void);
int acpi_boot_table_init (void);