dect
/
linux-2.6
Archived
13
0
Fork 0

Merge branch 'tip/perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into perf/core

This commit is contained in:
Ingo Molnar 2010-09-24 09:12:05 +02:00
commit a5a2bad55d
33 changed files with 843 additions and 209 deletions

View File

@ -591,6 +591,11 @@ KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow)
# conserve stack if available # conserve stack if available
KBUILD_CFLAGS += $(call cc-option,-fconserve-stack) KBUILD_CFLAGS += $(call cc-option,-fconserve-stack)
# check for 'asm goto'
ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC)), y)
KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
endif
# Add user supplied CPPFLAGS, AFLAGS and CFLAGS as the last assignments # Add user supplied CPPFLAGS, AFLAGS and CFLAGS as the last assignments
# But warn user when we do so # But warn user when we do so
warn-assign = \ warn-assign = \

View File

@ -158,4 +158,7 @@ config HAVE_PERF_EVENTS_NMI
subsystem. Also has support for calculating CPU cycle events subsystem. Also has support for calculating CPU cycle events
to determine how many clock cycles in a given period. to determine how many clock cycles in a given period.
config HAVE_ARCH_JUMP_LABEL
bool
source "kernel/gcov/Kconfig" source "kernel/gcov/Kconfig"

View File

@ -30,6 +30,7 @@ config SPARC
select PERF_USE_VMALLOC select PERF_USE_VMALLOC
select HAVE_DMA_ATTRS select HAVE_DMA_ATTRS
select HAVE_DMA_API_DEBUG select HAVE_DMA_API_DEBUG
select HAVE_ARCH_JUMP_LABEL
config SPARC32 config SPARC32
def_bool !64BIT def_bool !64BIT

View File

@ -0,0 +1,32 @@
#ifndef _ASM_SPARC_JUMP_LABEL_H
#define _ASM_SPARC_JUMP_LABEL_H
#ifdef __KERNEL__
#include <linux/types.h>
#include <asm/system.h>
#define JUMP_LABEL_NOP_SIZE 4
#define JUMP_LABEL(key, label) \
do { \
asm goto("1:\n\t" \
"nop\n\t" \
"nop\n\t" \
".pushsection __jump_table, \"a\"\n\t"\
".word 1b, %l[" #label "], %c0\n\t" \
".popsection \n\t" \
: : "i" (key) : : label);\
} while (0)
#endif /* __KERNEL__ */
typedef u32 jump_label_t;
struct jump_entry {
jump_label_t code;
jump_label_t target;
jump_label_t key;
};
#endif

View File

@ -119,3 +119,5 @@ obj-$(CONFIG_COMPAT) += $(audit--y)
pc--$(CONFIG_PERF_EVENTS) := perf_event.o pc--$(CONFIG_PERF_EVENTS) := perf_event.o
obj-$(CONFIG_SPARC64) += $(pc--y) obj-$(CONFIG_SPARC64) += $(pc--y)
obj-$(CONFIG_SPARC64) += jump_label.o

View File

@ -0,0 +1,47 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/cpu.h>
#include <linux/jump_label.h>
#include <linux/memory.h>
#ifdef HAVE_JUMP_LABEL
void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
{
u32 val;
u32 *insn = (u32 *) (unsigned long) entry->code;
if (type == JUMP_LABEL_ENABLE) {
s32 off = (s32)entry->target - (s32)entry->code;
#ifdef CONFIG_SPARC64
/* ba,pt %xcc, . + (off << 2) */
val = 0x10680000 | ((u32) off >> 2);
#else
/* ba . + (off << 2) */
val = 0x10800000 | ((u32) off >> 2);
#endif
} else {
val = 0x01000000;
}
get_online_cpus();
mutex_lock(&text_mutex);
*insn = val;
flushi(insn);
mutex_unlock(&text_mutex);
put_online_cpus();
}
void arch_jump_label_text_poke_early(jump_label_t addr)
{
u32 *insn_p = (u32 *) (unsigned long) addr;
*insn_p = 0x01000000;
flushi(insn_p);
}
#endif

View File

@ -18,6 +18,9 @@
#include <asm/spitfire.h> #include <asm/spitfire.h>
#ifdef CONFIG_SPARC64 #ifdef CONFIG_SPARC64
#include <linux/jump_label.h>
static void *module_map(unsigned long size) static void *module_map(unsigned long size)
{ {
struct vm_struct *area; struct vm_struct *area;
@ -227,6 +230,9 @@ int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs, const Elf_Shdr *sechdrs,
struct module *me) struct module *me)
{ {
/* make jump label nops */
jump_label_apply_nops(me);
/* Cheetah's I-cache is fully coherent. */ /* Cheetah's I-cache is fully coherent. */
if (tlb_type == spitfire) { if (tlb_type == spitfire) {
unsigned long va; unsigned long va;

View File

@ -59,6 +59,7 @@ config X86
select ANON_INODES select ANON_INODES
select HAVE_ARCH_KMEMCHECK select HAVE_ARCH_KMEMCHECK
select HAVE_USER_RETURN_NOTIFIER select HAVE_USER_RETURN_NOTIFIER
select HAVE_ARCH_JUMP_LABEL
config INSTRUCTION_DECODER config INSTRUCTION_DECODER
def_bool (KPROBES || PERF_EVENTS) def_bool (KPROBES || PERF_EVENTS)

View File

@ -4,6 +4,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/stddef.h> #include <linux/stddef.h>
#include <linux/stringify.h> #include <linux/stringify.h>
#include <linux/jump_label.h>
#include <asm/asm.h> #include <asm/asm.h>
/* /*
@ -160,6 +161,8 @@ static inline void apply_paravirt(struct paravirt_patch_site *start,
#define __parainstructions_end NULL #define __parainstructions_end NULL
#endif #endif
extern void *text_poke_early(void *addr, const void *opcode, size_t len);
/* /*
* Clear and restore the kernel write-protection flag on the local CPU. * Clear and restore the kernel write-protection flag on the local CPU.
* Allows the kernel to edit read-only pages. * Allows the kernel to edit read-only pages.
@ -180,4 +183,12 @@ static inline void apply_paravirt(struct paravirt_patch_site *start,
extern void *text_poke(void *addr, const void *opcode, size_t len); extern void *text_poke(void *addr, const void *opcode, size_t len);
extern void *text_poke_smp(void *addr, const void *opcode, size_t len); extern void *text_poke_smp(void *addr, const void *opcode, size_t len);
#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL)
#define IDEAL_NOP_SIZE_5 5
extern unsigned char ideal_nop5[IDEAL_NOP_SIZE_5];
extern void arch_init_ideal_nop5(void);
#else
static inline void arch_init_ideal_nop5(void) {}
#endif
#endif /* _ASM_X86_ALTERNATIVE_H */ #endif /* _ASM_X86_ALTERNATIVE_H */

View File

@ -0,0 +1,37 @@
#ifndef _ASM_X86_JUMP_LABEL_H
#define _ASM_X86_JUMP_LABEL_H
#ifdef __KERNEL__
#include <linux/types.h>
#include <asm/nops.h>
#define JUMP_LABEL_NOP_SIZE 5
# define JUMP_LABEL_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t"
# define JUMP_LABEL(key, label) \
do { \
asm goto("1:" \
JUMP_LABEL_INITIAL_NOP \
".pushsection __jump_table, \"a\" \n\t"\
_ASM_PTR "1b, %l[" #label "], %c0 \n\t" \
".popsection \n\t" \
: : "i" (key) : : label); \
} while (0)
#endif /* __KERNEL__ */
#ifdef CONFIG_X86_64
typedef u64 jump_label_t;
#else
typedef u32 jump_label_t;
#endif
struct jump_entry {
jump_label_t code;
jump_label_t target;
jump_label_t key;
};
#endif

View File

@ -32,7 +32,7 @@ GCOV_PROFILE_paravirt.o := n
obj-y := process_$(BITS).o signal.o entry_$(BITS).o obj-y := process_$(BITS).o signal.o entry_$(BITS).o
obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
obj-y += time.o ioport.o ldt.o dumpstack.o obj-y += time.o ioport.o ldt.o dumpstack.o
obj-y += setup.o x86_init.o i8259.o irqinit.o obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
obj-$(CONFIG_X86_VISWS) += visws_quirks.o obj-$(CONFIG_X86_VISWS) += visws_quirks.o
obj-$(CONFIG_X86_32) += probe_roms_32.o obj-$(CONFIG_X86_32) += probe_roms_32.o
obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o

View File

@ -195,7 +195,7 @@ static void __init_or_module add_nops(void *insns, unsigned int len)
extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
extern s32 __smp_locks[], __smp_locks_end[]; extern s32 __smp_locks[], __smp_locks_end[];
static void *text_poke_early(void *addr, const void *opcode, size_t len); void *text_poke_early(void *addr, const void *opcode, size_t len);
/* Replace instructions with better alternatives for this CPU type. /* Replace instructions with better alternatives for this CPU type.
This runs before SMP is initialized to avoid SMP problems with This runs before SMP is initialized to avoid SMP problems with
@ -522,7 +522,7 @@ void __init alternative_instructions(void)
* instructions. And on the local CPU you need to be protected again NMI or MCE * instructions. And on the local CPU you need to be protected again NMI or MCE
* handlers seeing an inconsistent instruction while you patch. * handlers seeing an inconsistent instruction while you patch.
*/ */
static void *__init_or_module text_poke_early(void *addr, const void *opcode, void *__init_or_module text_poke_early(void *addr, const void *opcode,
size_t len) size_t len)
{ {
unsigned long flags; unsigned long flags;
@ -641,3 +641,67 @@ void *__kprobes text_poke_smp(void *addr, const void *opcode, size_t len)
return addr; return addr;
} }
#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL)
unsigned char ideal_nop5[IDEAL_NOP_SIZE_5];
void __init arch_init_ideal_nop5(void)
{
extern const unsigned char ftrace_test_p6nop[];
extern const unsigned char ftrace_test_nop5[];
extern const unsigned char ftrace_test_jmp[];
int faulted = 0;
/*
* There is no good nop for all x86 archs.
* We will default to using the P6_NOP5, but first we
* will test to make sure that the nop will actually
* work on this CPU. If it faults, we will then
* go to a lesser efficient 5 byte nop. If that fails
* we then just use a jmp as our nop. This isn't the most
* efficient nop, but we can not use a multi part nop
* since we would then risk being preempted in the middle
* of that nop, and if we enabled tracing then, it might
* cause a system crash.
*
* TODO: check the cpuid to determine the best nop.
*/
asm volatile (
"ftrace_test_jmp:"
"jmp ftrace_test_p6nop\n"
"nop\n"
"nop\n"
"nop\n" /* 2 byte jmp + 3 bytes */
"ftrace_test_p6nop:"
P6_NOP5
"jmp 1f\n"
"ftrace_test_nop5:"
".byte 0x66,0x66,0x66,0x66,0x90\n"
"1:"
".section .fixup, \"ax\"\n"
"2: movl $1, %0\n"
" jmp ftrace_test_nop5\n"
"3: movl $2, %0\n"
" jmp 1b\n"
".previous\n"
_ASM_EXTABLE(ftrace_test_p6nop, 2b)
_ASM_EXTABLE(ftrace_test_nop5, 3b)
: "=r"(faulted) : "0" (faulted));
switch (faulted) {
case 0:
pr_info("converting mcount calls to 0f 1f 44 00 00\n");
memcpy(ideal_nop5, ftrace_test_p6nop, IDEAL_NOP_SIZE_5);
break;
case 1:
pr_info("converting mcount calls to 66 66 66 66 90\n");
memcpy(ideal_nop5, ftrace_test_nop5, IDEAL_NOP_SIZE_5);
break;
case 2:
pr_info("converting mcount calls to jmp . + 5\n");
memcpy(ideal_nop5, ftrace_test_jmp, IDEAL_NOP_SIZE_5);
break;
}
}
#endif

View File

@ -257,14 +257,9 @@ do_ftrace_mod_code(unsigned long ip, void *new_code)
return mod_code_status; return mod_code_status;
} }
static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
static unsigned char *ftrace_nop_replace(void) static unsigned char *ftrace_nop_replace(void)
{ {
return ftrace_nop; return ideal_nop5;
} }
static int static int
@ -338,62 +333,6 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
int __init ftrace_dyn_arch_init(void *data) int __init ftrace_dyn_arch_init(void *data)
{ {
extern const unsigned char ftrace_test_p6nop[];
extern const unsigned char ftrace_test_nop5[];
extern const unsigned char ftrace_test_jmp[];
int faulted = 0;
/*
* There is no good nop for all x86 archs.
* We will default to using the P6_NOP5, but first we
* will test to make sure that the nop will actually
* work on this CPU. If it faults, we will then
* go to a lesser efficient 5 byte nop. If that fails
* we then just use a jmp as our nop. This isn't the most
* efficient nop, but we can not use a multi part nop
* since we would then risk being preempted in the middle
* of that nop, and if we enabled tracing then, it might
* cause a system crash.
*
* TODO: check the cpuid to determine the best nop.
*/
asm volatile (
"ftrace_test_jmp:"
"jmp ftrace_test_p6nop\n"
"nop\n"
"nop\n"
"nop\n" /* 2 byte jmp + 3 bytes */
"ftrace_test_p6nop:"
P6_NOP5
"jmp 1f\n"
"ftrace_test_nop5:"
".byte 0x66,0x66,0x66,0x66,0x90\n"
"1:"
".section .fixup, \"ax\"\n"
"2: movl $1, %0\n"
" jmp ftrace_test_nop5\n"
"3: movl $2, %0\n"
" jmp 1b\n"
".previous\n"
_ASM_EXTABLE(ftrace_test_p6nop, 2b)
_ASM_EXTABLE(ftrace_test_nop5, 3b)
: "=r"(faulted) : "0" (faulted));
switch (faulted) {
case 0:
pr_info("converting mcount calls to 0f 1f 44 00 00\n");
memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
break;
case 1:
pr_info("converting mcount calls to 66 66 66 66 90\n");
memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
break;
case 2:
pr_info("converting mcount calls to jmp . + 5\n");
memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
break;
}
/* The return code is retured via data */ /* The return code is retured via data */
*(unsigned long *)data = 0; *(unsigned long *)data = 0;

View File

@ -0,0 +1,50 @@
/*
* jump label x86 support
*
* Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
*
*/
#include <linux/jump_label.h>
#include <linux/memory.h>
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/jhash.h>
#include <linux/cpu.h>
#include <asm/kprobes.h>
#include <asm/alternative.h>
#ifdef HAVE_JUMP_LABEL
union jump_code_union {
char code[JUMP_LABEL_NOP_SIZE];
struct {
char jump;
int offset;
} __attribute__((packed));
};
void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
{
union jump_code_union code;
if (type == JUMP_LABEL_ENABLE) {
code.jump = 0xe9;
code.offset = entry->target -
(entry->code + JUMP_LABEL_NOP_SIZE);
} else
memcpy(&code, ideal_nop5, JUMP_LABEL_NOP_SIZE);
get_online_cpus();
mutex_lock(&text_mutex);
text_poke_smp((void *)entry->code, &code, JUMP_LABEL_NOP_SIZE);
mutex_unlock(&text_mutex);
put_online_cpus();
}
void arch_jump_label_text_poke_early(jump_label_t addr)
{
text_poke_early((void *)addr, ideal_nop5, JUMP_LABEL_NOP_SIZE);
}
#endif

View File

@ -1218,7 +1218,8 @@ static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src)
} }
/* Check whether the address range is reserved */ /* Check whether the address range is reserved */
if (ftrace_text_reserved(src, src + len - 1) || if (ftrace_text_reserved(src, src + len - 1) ||
alternatives_text_reserved(src, src + len - 1)) alternatives_text_reserved(src, src + len - 1) ||
jump_label_text_reserved(src, src + len - 1))
return -EBUSY; return -EBUSY;
return len; return len;

View File

@ -239,6 +239,9 @@ int module_finalize(const Elf_Ehdr *hdr,
apply_paravirt(pseg, pseg + para->sh_size); apply_paravirt(pseg, pseg + para->sh_size);
} }
/* make jump label nops */
jump_label_apply_nops(me);
return module_bug_finalize(hdr, sechdrs, me); return module_bug_finalize(hdr, sechdrs, me);
} }

View File

@ -112,6 +112,7 @@
#include <asm/numa_64.h> #include <asm/numa_64.h>
#endif #endif
#include <asm/mce.h> #include <asm/mce.h>
#include <asm/alternative.h>
/* /*
* end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries. * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries.
@ -726,6 +727,7 @@ void __init setup_arch(char **cmdline_p)
{ {
int acpi = 0; int acpi = 0;
int k8 = 0; int k8 = 0;
unsigned long flags;
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data)); memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
@ -1071,6 +1073,10 @@ void __init setup_arch(char **cmdline_p)
x86_init.oem.banner(); x86_init.oem.banner();
mcheck_init(); mcheck_init();
local_irq_save(flags);
arch_init_ideal_nop5();
local_irq_restore(flags);
} }
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32

View File

@ -220,6 +220,8 @@
\ \
BUG_TABLE \ BUG_TABLE \
\ \
JUMP_TABLE \
\
/* PCI quirks */ \ /* PCI quirks */ \
.pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
@ -563,6 +565,14 @@
#define BUG_TABLE #define BUG_TABLE
#endif #endif
#define JUMP_TABLE \
. = ALIGN(8); \
__jump_table : AT(ADDR(__jump_table) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___jump_table) = .; \
*(__jump_table) \
VMLINUX_SYMBOL(__stop___jump_table) = .; \
}
#ifdef CONFIG_PM_TRACE #ifdef CONFIG_PM_TRACE
#define TRACEDATA \ #define TRACEDATA \
. = ALIGN(4); \ . = ALIGN(4); \

View File

@ -1,6 +1,8 @@
#ifndef _DYNAMIC_DEBUG_H #ifndef _DYNAMIC_DEBUG_H
#define _DYNAMIC_DEBUG_H #define _DYNAMIC_DEBUG_H
#include <linux/jump_label.h>
/* dynamic_printk_enabled, and dynamic_printk_enabled2 are bitmasks in which /* dynamic_printk_enabled, and dynamic_printk_enabled2 are bitmasks in which
* bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They
* use independent hash functions, to reduce the chance of false positives. * use independent hash functions, to reduce the chance of false positives.
@ -22,8 +24,6 @@ struct _ddebug {
const char *function; const char *function;
const char *filename; const char *filename;
const char *format; const char *format;
char primary_hash;
char secondary_hash;
unsigned int lineno:24; unsigned int lineno:24;
/* /*
* The flags field controls the behaviour at the callsite. * The flags field controls the behaviour at the callsite.
@ -33,6 +33,7 @@ struct _ddebug {
#define _DPRINTK_FLAGS_PRINT (1<<0) /* printk() a message using the format */ #define _DPRINTK_FLAGS_PRINT (1<<0) /* printk() a message using the format */
#define _DPRINTK_FLAGS_DEFAULT 0 #define _DPRINTK_FLAGS_DEFAULT 0
unsigned int flags:8; unsigned int flags:8;
char enabled;
} __attribute__((aligned(8))); } __attribute__((aligned(8)));
@ -42,33 +43,35 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n,
#if defined(CONFIG_DYNAMIC_DEBUG) #if defined(CONFIG_DYNAMIC_DEBUG)
extern int ddebug_remove_module(const char *mod_name); extern int ddebug_remove_module(const char *mod_name);
#define __dynamic_dbg_enabled(dd) ({ \
int __ret = 0; \
if (unlikely((dynamic_debug_enabled & (1LL << DEBUG_HASH)) && \
(dynamic_debug_enabled2 & (1LL << DEBUG_HASH2)))) \
if (unlikely(dd.flags)) \
__ret = 1; \
__ret; })
#define dynamic_pr_debug(fmt, ...) do { \ #define dynamic_pr_debug(fmt, ...) do { \
__label__ do_printk; \
__label__ out; \
static struct _ddebug descriptor \ static struct _ddebug descriptor \
__used \ __used \
__attribute__((section("__verbose"), aligned(8))) = \ __attribute__((section("__verbose"), aligned(8))) = \
{ KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH, \ { KBUILD_MODNAME, __func__, __FILE__, fmt, __LINE__, \
DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \ _DPRINTK_FLAGS_DEFAULT }; \
if (__dynamic_dbg_enabled(descriptor)) \ JUMP_LABEL(&descriptor.enabled, do_printk); \
printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \ goto out; \
do_printk: \
printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
out: ; \
} while (0) } while (0)
#define dynamic_dev_dbg(dev, fmt, ...) do { \ #define dynamic_dev_dbg(dev, fmt, ...) do { \
__label__ do_printk; \
__label__ out; \
static struct _ddebug descriptor \ static struct _ddebug descriptor \
__used \ __used \
__attribute__((section("__verbose"), aligned(8))) = \ __attribute__((section("__verbose"), aligned(8))) = \
{ KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH, \ { KBUILD_MODNAME, __func__, __FILE__, fmt, __LINE__, \
DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \ _DPRINTK_FLAGS_DEFAULT }; \
if (__dynamic_dbg_enabled(descriptor)) \ JUMP_LABEL(&descriptor.enabled, do_printk); \
dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \ goto out; \
do_printk: \
dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \
out: ; \
} while (0) } while (0)
#else #else

View File

@ -0,0 +1,64 @@
#ifndef _LINUX_JUMP_LABEL_H
#define _LINUX_JUMP_LABEL_H
#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_HAVE_ARCH_JUMP_LABEL)
# include <asm/jump_label.h>
# define HAVE_JUMP_LABEL
#endif
enum jump_label_type {
JUMP_LABEL_ENABLE,
JUMP_LABEL_DISABLE
};
struct module;
#ifdef HAVE_JUMP_LABEL
extern struct jump_entry __start___jump_table[];
extern struct jump_entry __stop___jump_table[];
extern void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type);
extern void arch_jump_label_text_poke_early(jump_label_t addr);
extern void jump_label_update(unsigned long key, enum jump_label_type type);
extern void jump_label_apply_nops(struct module *mod);
extern int jump_label_text_reserved(void *start, void *end);
#define enable_jump_label(key) \
jump_label_update((unsigned long)key, JUMP_LABEL_ENABLE);
#define disable_jump_label(key) \
jump_label_update((unsigned long)key, JUMP_LABEL_DISABLE);
#else
#define JUMP_LABEL(key, label) \
do { \
if (unlikely(*key)) \
goto label; \
} while (0)
#define enable_jump_label(cond_var) \
do { \
*(cond_var) = 1; \
} while (0)
#define disable_jump_label(cond_var) \
do { \
*(cond_var) = 0; \
} while (0)
static inline int jump_label_apply_nops(struct module *mod)
{
return 0;
}
static inline int jump_label_text_reserved(void *start, void *end)
{
return 0;
}
#endif
#endif

View File

@ -350,7 +350,10 @@ struct module
struct tracepoint *tracepoints; struct tracepoint *tracepoints;
unsigned int num_tracepoints; unsigned int num_tracepoints;
#endif #endif
#ifdef HAVE_JUMP_LABEL
struct jump_entry *jump_entries;
unsigned int num_jump_entries;
#endif
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
const char **trace_bprintk_fmt_start; const char **trace_bprintk_fmt_start;
unsigned int num_trace_bprintk_fmt; unsigned int num_trace_bprintk_fmt;

View File

@ -17,6 +17,7 @@
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/jump_label.h>
struct module; struct module;
struct tracepoint; struct tracepoint;
@ -145,7 +146,9 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin,
extern struct tracepoint __tracepoint_##name; \ extern struct tracepoint __tracepoint_##name; \
static inline void trace_##name(proto) \ static inline void trace_##name(proto) \
{ \ { \
if (unlikely(__tracepoint_##name.state)) \ JUMP_LABEL(&__tracepoint_##name.state, do_trace); \
return; \
do_trace: \
__DO_TRACE(&__tracepoint_##name, \ __DO_TRACE(&__tracepoint_##name, \
TP_PROTO(data_proto), \ TP_PROTO(data_proto), \
TP_ARGS(data_args)); \ TP_ARGS(data_args)); \

View File

@ -10,7 +10,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o \
kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \ notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \
async.o range.o async.o range.o jump_label.o
obj-$(CONFIG_HAVE_EARLY_RES) += early_res.o obj-$(CONFIG_HAVE_EARLY_RES) += early_res.o
obj-y += groups.o obj-y += groups.o

429
kernel/jump_label.c Normal file
View File

@ -0,0 +1,429 @@
/*
* jump label support
*
* Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
*
*/
#include <linux/jump_label.h>
#include <linux/memory.h>
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/jhash.h>
#include <linux/slab.h>
#include <linux/sort.h>
#include <linux/err.h>
#ifdef HAVE_JUMP_LABEL
#define JUMP_LABEL_HASH_BITS 6
#define JUMP_LABEL_TABLE_SIZE (1 << JUMP_LABEL_HASH_BITS)
static struct hlist_head jump_label_table[JUMP_LABEL_TABLE_SIZE];
/* mutex to protect coming/going of the the jump_label table */
static DEFINE_MUTEX(jump_label_mutex);
struct jump_label_entry {
struct hlist_node hlist;
struct jump_entry *table;
int nr_entries;
/* hang modules off here */
struct hlist_head modules;
unsigned long key;
};
struct jump_label_module_entry {
struct hlist_node hlist;
struct jump_entry *table;
int nr_entries;
struct module *mod;
};
static int jump_label_cmp(const void *a, const void *b)
{
const struct jump_entry *jea = a;
const struct jump_entry *jeb = b;
if (jea->key < jeb->key)
return -1;
if (jea->key > jeb->key)
return 1;
return 0;
}
static void
sort_jump_label_entries(struct jump_entry *start, struct jump_entry *stop)
{
unsigned long size;
size = (((unsigned long)stop - (unsigned long)start)
/ sizeof(struct jump_entry));
sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
}
static struct jump_label_entry *get_jump_label_entry(jump_label_t key)
{
struct hlist_head *head;
struct hlist_node *node;
struct jump_label_entry *e;
u32 hash = jhash((void *)&key, sizeof(jump_label_t), 0);
head = &jump_label_table[hash & (JUMP_LABEL_TABLE_SIZE - 1)];
hlist_for_each_entry(e, node, head, hlist) {
if (key == e->key)
return e;
}
return NULL;
}
static struct jump_label_entry *
add_jump_label_entry(jump_label_t key, int nr_entries, struct jump_entry *table)
{
struct hlist_head *head;
struct jump_label_entry *e;
u32 hash;
e = get_jump_label_entry(key);
if (e)
return ERR_PTR(-EEXIST);
e = kmalloc(sizeof(struct jump_label_entry), GFP_KERNEL);
if (!e)
return ERR_PTR(-ENOMEM);
hash = jhash((void *)&key, sizeof(jump_label_t), 0);
head = &jump_label_table[hash & (JUMP_LABEL_TABLE_SIZE - 1)];
e->key = key;
e->table = table;
e->nr_entries = nr_entries;
INIT_HLIST_HEAD(&(e->modules));
hlist_add_head(&e->hlist, head);
return e;
}
static int
build_jump_label_hashtable(struct jump_entry *start, struct jump_entry *stop)
{
struct jump_entry *iter, *iter_begin;
struct jump_label_entry *entry;
int count;
sort_jump_label_entries(start, stop);
iter = start;
while (iter < stop) {
entry = get_jump_label_entry(iter->key);
if (!entry) {
iter_begin = iter;
count = 0;
while ((iter < stop) &&
(iter->key == iter_begin->key)) {
iter++;
count++;
}
entry = add_jump_label_entry(iter_begin->key,
count, iter_begin);
if (IS_ERR(entry))
return PTR_ERR(entry);
} else {
WARN_ONCE(1, KERN_ERR "build_jump_hashtable: unexpected entry!\n");
return -1;
}
}
return 0;
}
/***
* jump_label_update - update jump label text
* @key - key value associated with a a jump label
* @type - enum set to JUMP_LABEL_ENABLE or JUMP_LABEL_DISABLE
*
* Will enable/disable the jump for jump label @key, depending on the
* value of @type.
*
*/
void jump_label_update(unsigned long key, enum jump_label_type type)
{
struct jump_entry *iter;
struct jump_label_entry *entry;
struct hlist_node *module_node;
struct jump_label_module_entry *e_module;
int count;
mutex_lock(&jump_label_mutex);
entry = get_jump_label_entry((jump_label_t)key);
if (entry) {
count = entry->nr_entries;
iter = entry->table;
while (count--) {
if (kernel_text_address(iter->code))
arch_jump_label_transform(iter, type);
iter++;
}
/* eanble/disable jump labels in modules */
hlist_for_each_entry(e_module, module_node, &(entry->modules),
hlist) {
count = e_module->nr_entries;
iter = e_module->table;
while (count--) {
if (kernel_text_address(iter->code))
arch_jump_label_transform(iter, type);
iter++;
}
}
}
mutex_unlock(&jump_label_mutex);
}
static int addr_conflict(struct jump_entry *entry, void *start, void *end)
{
if (entry->code <= (unsigned long)end &&
entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
return 1;
return 0;
}
#ifdef CONFIG_MODULES
static int module_conflict(void *start, void *end)
{
struct hlist_head *head;
struct hlist_node *node, *node_next, *module_node, *module_node_next;
struct jump_label_entry *e;
struct jump_label_module_entry *e_module;
struct jump_entry *iter;
int i, count;
int conflict = 0;
for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) {
head = &jump_label_table[i];
hlist_for_each_entry_safe(e, node, node_next, head, hlist) {
hlist_for_each_entry_safe(e_module, module_node,
module_node_next,
&(e->modules), hlist) {
count = e_module->nr_entries;
iter = e_module->table;
while (count--) {
if (addr_conflict(iter, start, end)) {
conflict = 1;
goto out;
}
iter++;
}
}
}
}
out:
return conflict;
}
#endif
/***
* jump_label_text_reserved - check if addr range is reserved
* @start: start text addr
* @end: end text addr
*
* checks if the text addr located between @start and @end
* overlaps with any of the jump label patch addresses. Code
* that wants to modify kernel text should first verify that
* it does not overlap with any of the jump label addresses.
*
* returns 1 if there is an overlap, 0 otherwise
*/
int jump_label_text_reserved(void *start, void *end)
{
struct jump_entry *iter;
struct jump_entry *iter_start = __start___jump_table;
struct jump_entry *iter_stop = __start___jump_table;
int conflict = 0;
mutex_lock(&jump_label_mutex);
iter = iter_start;
while (iter < iter_stop) {
if (addr_conflict(iter, start, end)) {
conflict = 1;
goto out;
}
iter++;
}
/* now check modules */
#ifdef CONFIG_MODULES
conflict = module_conflict(start, end);
#endif
out:
mutex_unlock(&jump_label_mutex);
return conflict;
}
static __init int init_jump_label(void)
{
int ret;
struct jump_entry *iter_start = __start___jump_table;
struct jump_entry *iter_stop = __stop___jump_table;
struct jump_entry *iter;
mutex_lock(&jump_label_mutex);
ret = build_jump_label_hashtable(__start___jump_table,
__stop___jump_table);
iter = iter_start;
while (iter < iter_stop) {
arch_jump_label_text_poke_early(iter->code);
iter++;
}
mutex_unlock(&jump_label_mutex);
return ret;
}
early_initcall(init_jump_label);
#ifdef CONFIG_MODULES
static struct jump_label_module_entry *
add_jump_label_module_entry(struct jump_label_entry *entry,
struct jump_entry *iter_begin,
int count, struct module *mod)
{
struct jump_label_module_entry *e;
e = kmalloc(sizeof(struct jump_label_module_entry), GFP_KERNEL);
if (!e)
return ERR_PTR(-ENOMEM);
e->mod = mod;
e->nr_entries = count;
e->table = iter_begin;
hlist_add_head(&e->hlist, &entry->modules);
return e;
}
static int add_jump_label_module(struct module *mod)
{
struct jump_entry *iter, *iter_begin;
struct jump_label_entry *entry;
struct jump_label_module_entry *module_entry;
int count;
/* if the module doesn't have jump label entries, just return */
if (!mod->num_jump_entries)
return 0;
sort_jump_label_entries(mod->jump_entries,
mod->jump_entries + mod->num_jump_entries);
iter = mod->jump_entries;
while (iter < mod->jump_entries + mod->num_jump_entries) {
entry = get_jump_label_entry(iter->key);
iter_begin = iter;
count = 0;
while ((iter < mod->jump_entries + mod->num_jump_entries) &&
(iter->key == iter_begin->key)) {
iter++;
count++;
}
if (!entry) {
entry = add_jump_label_entry(iter_begin->key, 0, NULL);
if (IS_ERR(entry))
return PTR_ERR(entry);
}
module_entry = add_jump_label_module_entry(entry, iter_begin,
count, mod);
if (IS_ERR(module_entry))
return PTR_ERR(module_entry);
}
return 0;
}
static void remove_jump_label_module(struct module *mod)
{
struct hlist_head *head;
struct hlist_node *node, *node_next, *module_node, *module_node_next;
struct jump_label_entry *e;
struct jump_label_module_entry *e_module;
int i;
/* if the module doesn't have jump label entries, just return */
if (!mod->num_jump_entries)
return;
for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) {
head = &jump_label_table[i];
hlist_for_each_entry_safe(e, node, node_next, head, hlist) {
hlist_for_each_entry_safe(e_module, module_node,
module_node_next,
&(e->modules), hlist) {
if (e_module->mod == mod) {
hlist_del(&e_module->hlist);
kfree(e_module);
}
}
if (hlist_empty(&e->modules) && (e->nr_entries == 0)) {
hlist_del(&e->hlist);
kfree(e);
}
}
}
}
static int
jump_label_module_notify(struct notifier_block *self, unsigned long val,
void *data)
{
struct module *mod = data;
int ret = 0;
switch (val) {
case MODULE_STATE_COMING:
mutex_lock(&jump_label_mutex);
ret = add_jump_label_module(mod);
if (ret)
remove_jump_label_module(mod);
mutex_unlock(&jump_label_mutex);
break;
case MODULE_STATE_GOING:
mutex_lock(&jump_label_mutex);
remove_jump_label_module(mod);
mutex_unlock(&jump_label_mutex);
break;
}
return ret;
}
/***
* apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
* @mod: module to patch
*
* Allow for run-time selection of the optimal nops. Before the module
* loads patch these with arch_get_jump_label_nop(), which is specified by
* the arch specific jump label code.
*/
void jump_label_apply_nops(struct module *mod)
{
struct jump_entry *iter;
/* if the module doesn't have jump label entries, just return */
if (!mod->num_jump_entries)
return;
iter = mod->jump_entries;
while (iter < mod->jump_entries + mod->num_jump_entries) {
arch_jump_label_text_poke_early(iter->code);
iter++;
}
}
struct notifier_block jump_label_module_nb = {
.notifier_call = jump_label_module_notify,
.priority = 0,
};
static __init int init_jump_label_module(void)
{
return register_module_notifier(&jump_label_module_nb);
}
early_initcall(init_jump_label_module);
#endif /* CONFIG_MODULES */
#endif

View File

@ -47,6 +47,7 @@
#include <linux/memory.h> #include <linux/memory.h>
#include <linux/ftrace.h> #include <linux/ftrace.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/jump_label.h>
#include <asm-generic/sections.h> #include <asm-generic/sections.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
@ -1146,7 +1147,8 @@ int __kprobes register_kprobe(struct kprobe *p)
preempt_disable(); preempt_disable();
if (!kernel_text_address((unsigned long) p->addr) || if (!kernel_text_address((unsigned long) p->addr) ||
in_kprobes_functions((unsigned long) p->addr) || in_kprobes_functions((unsigned long) p->addr) ||
ftrace_text_reserved(p->addr, p->addr)) { ftrace_text_reserved(p->addr, p->addr) ||
jump_label_text_reserved(p->addr, p->addr)) {
preempt_enable(); preempt_enable();
return -EINVAL; return -EINVAL;
} }

View File

@ -55,6 +55,7 @@
#include <linux/async.h> #include <linux/async.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/kmemleak.h> #include <linux/kmemleak.h>
#include <linux/jump_label.h>
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include <trace/events/module.h> #include <trace/events/module.h>
@ -2308,6 +2309,11 @@ static void find_module_sections(struct module *mod, struct load_info *info)
sizeof(*mod->tracepoints), sizeof(*mod->tracepoints),
&mod->num_tracepoints); &mod->num_tracepoints);
#endif #endif
#ifdef HAVE_JUMP_LABEL
mod->jump_entries = section_objs(info, "__jump_table",
sizeof(*mod->jump_entries),
&mod->num_jump_entries);
#endif
#ifdef CONFIG_EVENT_TRACING #ifdef CONFIG_EVENT_TRACING
mod->trace_events = section_objs(info, "_ftrace_events", mod->trace_events = section_objs(info, "_ftrace_events",
sizeof(*mod->trace_events), sizeof(*mod->trace_events),

View File

@ -263,6 +263,11 @@ int __init trace_workqueue_early_init(void)
{ {
int ret, cpu; int ret, cpu;
for_each_possible_cpu(cpu) {
spin_lock_init(&workqueue_cpu_stat(cpu)->lock);
INIT_LIST_HEAD(&workqueue_cpu_stat(cpu)->list);
}
ret = register_trace_workqueue_insertion(probe_workqueue_insertion, NULL); ret = register_trace_workqueue_insertion(probe_workqueue_insertion, NULL);
if (ret) if (ret)
goto out; goto out;
@ -279,11 +284,6 @@ int __init trace_workqueue_early_init(void)
if (ret) if (ret)
goto no_creation; goto no_creation;
for_each_possible_cpu(cpu) {
spin_lock_init(&workqueue_cpu_stat(cpu)->lock);
INIT_LIST_HEAD(&workqueue_cpu_stat(cpu)->list);
}
return 0; return 0;
no_creation: no_creation:

View File

@ -25,6 +25,7 @@
#include <linux/err.h> #include <linux/err.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/jump_label.h>
extern struct tracepoint __start___tracepoints[]; extern struct tracepoint __start___tracepoints[];
extern struct tracepoint __stop___tracepoints[]; extern struct tracepoint __stop___tracepoints[];
@ -263,7 +264,13 @@ static void set_tracepoint(struct tracepoint_entry **entry,
* is used. * is used.
*/ */
rcu_assign_pointer(elem->funcs, (*entry)->funcs); rcu_assign_pointer(elem->funcs, (*entry)->funcs);
elem->state = active; if (!elem->state && active) {
enable_jump_label(&elem->state);
elem->state = active;
} else if (elem->state && !active) {
disable_jump_label(&elem->state);
elem->state = active;
}
} }
/* /*
@ -277,7 +284,10 @@ static void disable_tracepoint(struct tracepoint *elem)
if (elem->unregfunc && elem->state) if (elem->unregfunc && elem->state)
elem->unregfunc(); elem->unregfunc();
elem->state = 0; if (elem->state) {
disable_jump_label(&elem->state);
elem->state = 0;
}
rcu_assign_pointer(elem->funcs, NULL); rcu_assign_pointer(elem->funcs, NULL);
} }

View File

@ -26,19 +26,11 @@
#include <linux/dynamic_debug.h> #include <linux/dynamic_debug.h>
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/jump_label.h>
extern struct _ddebug __start___verbose[]; extern struct _ddebug __start___verbose[];
extern struct _ddebug __stop___verbose[]; extern struct _ddebug __stop___verbose[];
/* dynamic_debug_enabled, and dynamic_debug_enabled2 are bitmasks in which
* bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They
* use independent hash functions, to reduce the chance of false positives.
*/
long long dynamic_debug_enabled;
EXPORT_SYMBOL_GPL(dynamic_debug_enabled);
long long dynamic_debug_enabled2;
EXPORT_SYMBOL_GPL(dynamic_debug_enabled2);
struct ddebug_table { struct ddebug_table {
struct list_head link; struct list_head link;
char *mod_name; char *mod_name;
@ -87,26 +79,6 @@ static char *ddebug_describe_flags(struct _ddebug *dp, char *buf,
return buf; return buf;
} }
/*
* must be called with ddebug_lock held
*/
static int disabled_hash(char hash, bool first_table)
{
struct ddebug_table *dt;
char table_hash_value;
list_for_each_entry(dt, &ddebug_tables, link) {
if (first_table)
table_hash_value = dt->ddebugs->primary_hash;
else
table_hash_value = dt->ddebugs->secondary_hash;
if (dt->num_enabled && (hash == table_hash_value))
return 0;
}
return 1;
}
/* /*
* Search the tables for _ddebug's which match the given * Search the tables for _ddebug's which match the given
* `query' and apply the `flags' and `mask' to them. Tells * `query' and apply the `flags' and `mask' to them. Tells
@ -170,17 +142,9 @@ static void ddebug_change(const struct ddebug_query *query,
dt->num_enabled++; dt->num_enabled++;
dp->flags = newflags; dp->flags = newflags;
if (newflags) { if (newflags) {
dynamic_debug_enabled |= enable_jump_label(&dp->enabled);
(1LL << dp->primary_hash);
dynamic_debug_enabled2 |=
(1LL << dp->secondary_hash);
} else { } else {
if (disabled_hash(dp->primary_hash, true)) disable_jump_label(&dp->enabled);
dynamic_debug_enabled &=
~(1LL << dp->primary_hash);
if (disabled_hash(dp->secondary_hash, false))
dynamic_debug_enabled2 &=
~(1LL << dp->secondary_hash);
} }
if (verbose) if (verbose)
printk(KERN_INFO printk(KERN_INFO

View File

@ -101,14 +101,6 @@ basename_flags = -D"KBUILD_BASENAME=KBUILD_STR($(call name-fix,$(basetarget)))"
modname_flags = $(if $(filter 1,$(words $(modname))),\ modname_flags = $(if $(filter 1,$(words $(modname))),\
-D"KBUILD_MODNAME=KBUILD_STR($(call name-fix,$(modname)))") -D"KBUILD_MODNAME=KBUILD_STR($(call name-fix,$(modname)))")
#hash values
ifdef CONFIG_DYNAMIC_DEBUG
debug_flags = -D"DEBUG_HASH=$(shell ./scripts/basic/hash djb2 $(@D)$(modname))"\
-D"DEBUG_HASH2=$(shell ./scripts/basic/hash r5 $(@D)$(modname))"
else
debug_flags =
endif
orig_c_flags = $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(KBUILD_SUBDIR_CCFLAGS) \ orig_c_flags = $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(KBUILD_SUBDIR_CCFLAGS) \
$(ccflags-y) $(CFLAGS_$(basetarget).o) $(ccflags-y) $(CFLAGS_$(basetarget).o)
_c_flags = $(filter-out $(CFLAGS_REMOVE_$(basetarget).o), $(orig_c_flags)) _c_flags = $(filter-out $(CFLAGS_REMOVE_$(basetarget).o), $(orig_c_flags))
@ -152,8 +144,7 @@ endif
c_flags = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \ c_flags = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \
$(__c_flags) $(modkern_cflags) \ $(__c_flags) $(modkern_cflags) \
-D"KBUILD_STR(s)=\#s" $(basename_flags) $(modname_flags) \ -D"KBUILD_STR(s)=\#s" $(basename_flags) $(modname_flags)
$(debug_flags)
a_flags = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \ a_flags = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \
$(__a_flags) $(modkern_aflags) $(__a_flags) $(modkern_aflags)

View File

@ -9,7 +9,7 @@
# fixdep: Used to generate dependency information during build process # fixdep: Used to generate dependency information during build process
# docproc: Used in Documentation/DocBook # docproc: Used in Documentation/DocBook
hostprogs-y := fixdep docproc hash hostprogs-y := fixdep docproc
always := $(hostprogs-y) always := $(hostprogs-y)
# fixdep is needed to compile other host programs # fixdep is needed to compile other host programs

View File

@ -1,64 +0,0 @@
/*
* Copyright (C) 2008 Red Hat, Inc., Jason Baron <jbaron@redhat.com>
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define DYNAMIC_DEBUG_HASH_BITS 6
static const char *program;
static void usage(void)
{
printf("Usage: %s <djb2|r5> <modname>\n", program);
exit(1);
}
/* djb2 hashing algorithm by Dan Bernstein. From:
* http://www.cse.yorku.ca/~oz/hash.html
*/
static unsigned int djb2_hash(char *str)
{
unsigned long hash = 5381;
int c;
c = *str;
while (c) {
hash = ((hash << 5) + hash) + c;
c = *++str;
}
return (unsigned int)(hash & ((1 << DYNAMIC_DEBUG_HASH_BITS) - 1));
}
static unsigned int r5_hash(char *str)
{
unsigned long hash = 0;
int c;
c = *str;
while (c) {
hash = (hash + (c << 4) + (c >> 4)) * 11;
c = *++str;
}
return (unsigned int)(hash & ((1 << DYNAMIC_DEBUG_HASH_BITS) - 1));
}
int main(int argc, char *argv[])
{
program = argv[0];
if (argc != 3)
usage();
if (!strcmp(argv[1], "djb2"))
printf("%d\n", djb2_hash(argv[2]));
else if (!strcmp(argv[1], "r5"))
printf("%d\n", r5_hash(argv[2]));
else
usage();
exit(0);
}

5
scripts/gcc-goto.sh Normal file
View File

@ -0,0 +1,5 @@
#!/bin/sh
# Test for gcc 'asm goto' suport
# Copyright (C) 2010, Jason Baron <jbaron@redhat.com>
echo "int main(void) { entry: asm goto (\"\"::::entry); return 0; }" | $1 -x c - -c -o /dev/null >/dev/null 2>&1 && echo "y"