dect
/
linux-2.6
Archived
13
0
Fork 0

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

This commit is contained in:
David S. Miller 2012-05-20 21:53:04 -04:00
commit 17eea0df5f
60 changed files with 514 additions and 260 deletions

View File

@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 4
SUBLEVEL = 0
EXTRAVERSION = -rc7
EXTRAVERSION =
NAME = Saber-toothed Squirrel
# *DOCUMENTATION*
@ -442,7 +442,7 @@ asm-generic:
no-dot-config-targets := clean mrproper distclean \
cscope gtags TAGS tags help %docs check% coccicheck \
include/linux/version.h headers_% archheaders \
include/linux/version.h headers_% archheaders archscripts \
kernelversion %src-pkg
config-targets := 0
@ -979,7 +979,7 @@ prepare1: prepare2 include/linux/version.h include/generated/utsrelease.h \
include/config/auto.conf
$(cmd_crmodverdir)
archprepare: archheaders prepare1 scripts_basic
archprepare: archheaders archscripts prepare1 scripts_basic
prepare0: archprepare FORCE
$(Q)$(MAKE) $(build)=.
@ -1049,8 +1049,11 @@ hdr-dst = $(if $(KBUILD_HEADERS), dst=include/asm-$(hdr-arch), dst=include/asm)
PHONY += archheaders
archheaders:
PHONY += archscripts
archscripts:
PHONY += __headers
__headers: include/linux/version.h scripts_basic asm-generic archheaders FORCE
__headers: include/linux/version.h scripts_basic asm-generic archheaders archscripts FORCE
$(Q)$(MAKE) $(build)=scripts build_unifdef
PHONY += headers_install_all

View File

@ -42,7 +42,8 @@ sirfsoc_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num)
static __init void sirfsoc_irq_init(void)
{
sirfsoc_alloc_gc(sirfsoc_intc_base, 0, 32);
sirfsoc_alloc_gc(sirfsoc_intc_base + 4, 32, SIRFSOC_INTENAL_IRQ_END - 32);
sirfsoc_alloc_gc(sirfsoc_intc_base + 4, 32,
SIRFSOC_INTENAL_IRQ_END + 1 - 32);
writel_relaxed(0, sirfsoc_intc_base + SIRFSOC_INT_RISC_LEVEL0);
writel_relaxed(0, sirfsoc_intc_base + SIRFSOC_INT_RISC_LEVEL1);
@ -68,7 +69,8 @@ void __init sirfsoc_of_irq_init(void)
if (!sirfsoc_intc_base)
panic("unable to map intc cpu registers\n");
irq_domain_add_legacy(np, 32, 0, 0, &irq_domain_simple_ops, NULL);
irq_domain_add_legacy(np, SIRFSOC_INTENAL_IRQ_END + 1, 0, 0,
&irq_domain_simple_ops, NULL);
of_node_put(np);

View File

@ -53,10 +53,10 @@ static void flowctrl_update(u8 offset, u32 value)
void flowctrl_write_cpu_csr(unsigned int cpuid, u32 value)
{
return flowctrl_update(flowctrl_offset_halt_cpu[cpuid], value);
return flowctrl_update(flowctrl_offset_cpu_csr[cpuid], value);
}
void flowctrl_write_cpu_halt(unsigned int cpuid, u32 value)
{
return flowctrl_update(flowctrl_offset_cpu_csr[cpuid], value);
return flowctrl_update(flowctrl_offset_halt_cpu[cpuid], value);
}

View File

@ -247,7 +247,9 @@ good_area:
return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags);
check_stack:
if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
/* Don't allow expansion below FIRST_USER_ADDRESS */
if (vma->vm_flags & VM_GROWSDOWN &&
addr >= FIRST_USER_ADDRESS && !expand_stack(vma, addr))
goto good_area;
out:
return fault;

View File

@ -489,7 +489,8 @@ static void __init build_mem_type_table(void)
*/
for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
mem_types[i].prot_pte |= PTE_EXT_AF;
mem_types[i].prot_sect |= PMD_SECT_AF;
if (mem_types[i].prot_sect)
mem_types[i].prot_sect |= PMD_SECT_AF;
}
kern_pgprot |= PTE_EXT_AF;
vecs_pgprot |= PTE_EXT_AF;

View File

@ -11,6 +11,7 @@
#include <linux/types.h>
#include <linux/cpu.h>
#include <linux/cpu_pm.h>
#include <linux/hardirq.h>
#include <linux/kernel.h>
#include <linux/notifier.h>
#include <linux/signal.h>
@ -432,7 +433,10 @@ void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
static void vfp_enable(void *unused)
{
u32 access = get_copro_access();
u32 access;
BUG_ON(preemptible());
access = get_copro_access();
/*
* Enable full access to VFP (cp10 and cp11)
@ -573,12 +577,6 @@ int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
* entry.
*/
hwstate->fpscr &= ~(FPSCR_LENGTH_MASK | FPSCR_STRIDE_MASK);
/*
* Disable VFP in the hwstate so that we can detect if it gets
* used.
*/
hwstate->fpexc &= ~FPEXC_EN;
return 0;
}
@ -591,12 +589,8 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
unsigned long fpexc;
int err = 0;
/*
* If VFP has been used, then disable it to avoid corrupting
* the new thread state.
*/
if (hwstate->fpexc & FPEXC_EN)
vfp_flush_hwstate(thread);
/* Disable VFP to avoid corrupting the new thread state. */
vfp_flush_hwstate(thread);
/*
* Copy the floating point registers. There can be unused
@ -657,7 +651,7 @@ static int __init vfp_init(void)
unsigned int cpu_arch = cpu_architecture();
if (cpu_arch >= CPU_ARCH_ARMv6)
vfp_enable(NULL);
on_each_cpu(vfp_enable, NULL, 1);
/*
* First check that there is a VFP that we can use.
@ -678,8 +672,6 @@ static int __init vfp_init(void)
} else {
hotcpu_notifier(vfp_hotplug, 0);
smp_call_function(vfp_enable, NULL, 1);
VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */
printk("implementor %02x architecture %d part %02x variant %x rev %x\n",
(vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT,

View File

@ -135,10 +135,6 @@ unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) ((tsk)->thread.frame0->pc)
#define KSTK_ESP(tsk) ((tsk)->thread.frame0->sp)
/* Allocation and freeing of basic task resources. */
extern struct task_struct *alloc_task_struct_node(int node);
extern void free_task_struct(struct task_struct *p);
#define cpu_relax() barrier()
/* data cache prefetch */

View File

@ -21,7 +21,12 @@
#define ARCH_HAS_PREFETCH
static inline void prefetch(const void *addr)
{
__asm__("ldw 0(%0), %%r0" : : "r" (addr));
__asm__(
#ifndef CONFIG_PA20
/* Need to avoid prefetch of NULL on PA7300LC */
" extrw,u,= %0,31,32,%%r0\n"
#endif
" ldw 0(%0), %%r0" : : "r" (addr));
}
/* LDD is a PA2.0 addition. */

View File

@ -581,7 +581,11 @@
*/
cmpiclr,= 0x01,\tmp,%r0
ldi (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
#ifdef CONFIG_64BIT
depd,z \prot,8,7,\prot
#else
depw,z \prot,8,7,\prot
#endif
/*
* OK, it is in the temp alias region, check whether "from" or "to".
* Check "subtle" note in pacache.S re: r23/r26.

View File

@ -692,7 +692,7 @@ ENTRY(flush_icache_page_asm)
/* Purge any old translation */
pitlb (%sr0,%r28)
pitlb (%sr4,%r28)
ldil L%icache_stride, %r1
ldw R%icache_stride(%r1), %r1
@ -706,27 +706,29 @@ ENTRY(flush_icache_page_asm)
sub %r25, %r1, %r25
1: fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
/* fic only has the type 26 form on PA1.1, requiring an
* explicit space specification, so use %sr4 */
1: fic,m %r1(%sr4,%r28)
fic,m %r1(%sr4,%r28)
fic,m %r1(%sr4,%r28)
fic,m %r1(%sr4,%r28)
fic,m %r1(%sr4,%r28)
fic,m %r1(%sr4,%r28)
fic,m %r1(%sr4,%r28)
fic,m %r1(%sr4,%r28)
fic,m %r1(%sr4,%r28)
fic,m %r1(%sr4,%r28)
fic,m %r1(%sr4,%r28)
fic,m %r1(%sr4,%r28)
fic,m %r1(%sr4,%r28)
fic,m %r1(%sr4,%r28)
fic,m %r1(%sr4,%r28)
cmpb,COND(<<) %r28, %r25,1b
fic,m %r1(%r28)
fic,m %r1(%sr4,%r28)
sync
bv %r0(%r2)
pitlb (%sr0,%r25)
pitlb (%sr4,%r25)
.exit
.procend

View File

@ -11,6 +11,7 @@ config TILE
select GENERIC_IRQ_PROBE
select GENERIC_PENDING_IRQ if SMP
select GENERIC_IRQ_SHOW
select HAVE_SYSCALL_WRAPPERS if TILEGX
select SYS_HYPERVISOR
select ARCH_HAVE_NMI_SAFE_CMPXCHG

View File

@ -134,6 +134,9 @@ KBUILD_CFLAGS += $(call cc-option,-mno-avx,)
KBUILD_CFLAGS += $(mflags-y)
KBUILD_AFLAGS += $(mflags-y)
archscripts:
$(Q)$(MAKE) $(build)=arch/x86/tools relocs
###
# Syscall table generation

View File

@ -40,13 +40,12 @@ OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
$(obj)/vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy)
targets += vmlinux.bin.all vmlinux.relocs
targets += vmlinux.bin.all vmlinux.relocs relocs
hostprogs-$(CONFIG_X86_NEED_RELOCS) += relocs
CMD_RELOCS = arch/x86/tools/relocs
quiet_cmd_relocs = RELOCS $@
cmd_relocs = $(obj)/relocs $< > $@;$(obj)/relocs --abs-relocs $<
$(obj)/vmlinux.relocs: vmlinux $(obj)/relocs FORCE
cmd_relocs = $(CMD_RELOCS) $< > $@;$(CMD_RELOCS) --abs-relocs $<
$(obj)/vmlinux.relocs: vmlinux FORCE
$(call if_changed,relocs)
vmlinux.bin.all-y := $(obj)/vmlinux.bin

View File

@ -170,6 +170,9 @@ static inline int kvm_para_available(void)
unsigned int eax, ebx, ecx, edx;
char signature[13];
if (boot_cpu_data.cpuid_level < 0)
return 0; /* So we don't blow up on old processors */
cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx);
memcpy(signature + 0, &ebx, 4);
memcpy(signature + 4, &ecx, 4);

View File

@ -593,7 +593,7 @@ void __init acpi_set_irq_model_ioapic(void)
#ifdef CONFIG_ACPI_HOTPLUG_CPU
#include <acpi/processor.h>
static void __cpuinitdata acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
static void __cpuinit acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
{
#ifdef CONFIG_ACPI_NUMA
int nid;

View File

@ -945,9 +945,10 @@ struct mce_info {
atomic_t inuse;
struct task_struct *t;
__u64 paddr;
int restartable;
} mce_info[MCE_INFO_MAX];
static void mce_save_info(__u64 addr)
static void mce_save_info(__u64 addr, int c)
{
struct mce_info *mi;
@ -955,6 +956,7 @@ static void mce_save_info(__u64 addr)
if (atomic_cmpxchg(&mi->inuse, 0, 1) == 0) {
mi->t = current;
mi->paddr = addr;
mi->restartable = c;
return;
}
}
@ -1130,7 +1132,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
mce_panic("Fatal machine check on current CPU", &m, msg);
if (worst == MCE_AR_SEVERITY) {
/* schedule action before return to userland */
mce_save_info(m.addr);
mce_save_info(m.addr, m.mcgstatus & MCG_STATUS_RIPV);
set_thread_flag(TIF_MCE_NOTIFY);
} else if (kill_it) {
force_sig(SIGBUS, current);
@ -1179,7 +1181,13 @@ void mce_notify_process(void)
pr_err("Uncorrected hardware memory error in user-access at %llx",
mi->paddr);
if (memory_failure(pfn, MCE_VECTOR, MF_ACTION_REQUIRED) < 0) {
/*
* We must call memory_failure() here even if the current process is
* doomed. We still need to mark the page as poisoned and alert any
* other users of the page.
*/
if (memory_failure(pfn, MCE_VECTOR, MF_ACTION_REQUIRED) < 0 ||
mi->restartable == 0) {
pr_err("Memory error not recovered");
force_sig(SIGBUS, current);
}

View File

@ -147,12 +147,6 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
memset(csig, 0, sizeof(*csig));
if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
cpu_has(c, X86_FEATURE_IA64)) {
pr_err("CPU%d not a capable Intel processor\n", cpu_num);
return -1;
}
csig->sig = cpuid_eax(0x00000001);
if ((c->x86_model >= 5) || (c->x86 > 6)) {
@ -463,6 +457,14 @@ static struct microcode_ops microcode_intel_ops = {
struct microcode_ops * __init init_intel_microcode(void)
{
struct cpuinfo_x86 *c = &cpu_data(0);
if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
cpu_has(c, X86_FEATURE_IA64)) {
pr_err("Intel CPU family 0x%x not supported\n", c->x86);
return NULL;
}
return &microcode_intel_ops;
}

1
arch/x86/tools/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
relocs

View File

@ -36,3 +36,7 @@ HOSTCFLAGS_insn_sanity.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x
$(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
$(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
HOST_EXTRACFLAGS += -I$(srctree)/tools/include
hostprogs-y += relocs
relocs: $(obj)/relocs

View File

@ -18,6 +18,8 @@ static void die(char *fmt, ...);
static Elf32_Ehdr ehdr;
static unsigned long reloc_count, reloc_idx;
static unsigned long *relocs;
static unsigned long reloc16_count, reloc16_idx;
static unsigned long *relocs16;
struct section {
Elf32_Shdr shdr;
@ -28,52 +30,86 @@ struct section {
};
static struct section *secs;
enum symtype {
S_ABS,
S_REL,
S_SEG,
S_LIN,
S_NSYMTYPES
};
static const char * const sym_regex_kernel[S_NSYMTYPES] = {
/*
* Following symbols have been audited. There values are constant and do
* not change if bzImage is loaded at a different physical address than
* the address for which it has been compiled. Don't warn user about
* absolute relocations present w.r.t these symbols.
*/
static const char abs_sym_regex[] =
[S_ABS] =
"^(xen_irq_disable_direct_reloc$|"
"xen_save_fl_direct_reloc$|"
"VDSO|"
"__crc_)";
static regex_t abs_sym_regex_c;
static int is_abs_reloc(const char *sym_name)
{
return !regexec(&abs_sym_regex_c, sym_name, 0, NULL, 0);
}
"__crc_)",
/*
* These symbols are known to be relative, even if the linker marks them
* as absolute (typically defined outside any section in the linker script.)
*/
static const char rel_sym_regex[] =
"^_end$";
static regex_t rel_sym_regex_c;
static int is_rel_reloc(const char *sym_name)
[S_REL] =
"^(__init_(begin|end)|"
"__x86_cpu_dev_(start|end)|"
"(__parainstructions|__alt_instructions)(|_end)|"
"(__iommu_table|__apicdrivers|__smp_locks)(|_end)|"
"_end)$"
};
static const char * const sym_regex_realmode[S_NSYMTYPES] = {
/*
* These are 16-bit segment symbols when compiling 16-bit code.
*/
[S_SEG] =
"^real_mode_seg$",
/*
* These are offsets belonging to segments, as opposed to linear addresses,
* when compiling 16-bit code.
*/
[S_LIN] =
"^pa_",
};
static const char * const *sym_regex;
static regex_t sym_regex_c[S_NSYMTYPES];
static int is_reloc(enum symtype type, const char *sym_name)
{
return !regexec(&rel_sym_regex_c, sym_name, 0, NULL, 0);
return sym_regex[type] &&
!regexec(&sym_regex_c[type], sym_name, 0, NULL, 0);
}
static void regex_init(void)
static void regex_init(int use_real_mode)
{
char errbuf[128];
int err;
err = regcomp(&abs_sym_regex_c, abs_sym_regex,
REG_EXTENDED|REG_NOSUB);
if (err) {
regerror(err, &abs_sym_regex_c, errbuf, sizeof errbuf);
die("%s", errbuf);
}
int i;
err = regcomp(&rel_sym_regex_c, rel_sym_regex,
REG_EXTENDED|REG_NOSUB);
if (err) {
regerror(err, &rel_sym_regex_c, errbuf, sizeof errbuf);
die("%s", errbuf);
if (use_real_mode)
sym_regex = sym_regex_realmode;
else
sym_regex = sym_regex_kernel;
for (i = 0; i < S_NSYMTYPES; i++) {
if (!sym_regex[i])
continue;
err = regcomp(&sym_regex_c[i], sym_regex[i],
REG_EXTENDED|REG_NOSUB);
if (err) {
regerror(err, &sym_regex_c[i], errbuf, sizeof errbuf);
die("%s", errbuf);
}
}
}
@ -154,6 +190,10 @@ static const char *rel_type(unsigned type)
REL_TYPE(R_386_RELATIVE),
REL_TYPE(R_386_GOTOFF),
REL_TYPE(R_386_GOTPC),
REL_TYPE(R_386_8),
REL_TYPE(R_386_PC8),
REL_TYPE(R_386_16),
REL_TYPE(R_386_PC16),
#undef REL_TYPE
};
const char *name = "unknown type rel type name";
@ -189,7 +229,7 @@ static const char *sym_name(const char *sym_strtab, Elf32_Sym *sym)
name = sym_strtab + sym->st_name;
}
else {
name = sec_name(secs[sym->st_shndx].shdr.sh_name);
name = sec_name(sym->st_shndx);
}
return name;
}
@ -472,7 +512,7 @@ static void print_absolute_relocs(void)
* Before warning check if this absolute symbol
* relocation is harmless.
*/
if (is_abs_reloc(name) || is_rel_reloc(name))
if (is_reloc(S_ABS, name) || is_reloc(S_REL, name))
continue;
if (!printed) {
@ -496,7 +536,8 @@ static void print_absolute_relocs(void)
printf("\n");
}
static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
int use_real_mode)
{
int i;
/* Walk through the relocations */
@ -521,30 +562,67 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
Elf32_Rel *rel;
Elf32_Sym *sym;
unsigned r_type;
const char *symname;
int shn_abs;
rel = &sec->reltab[j];
sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
r_type = ELF32_R_TYPE(rel->r_info);
/* Don't visit relocations to absolute symbols */
if (sym->st_shndx == SHN_ABS &&
!is_rel_reloc(sym_name(sym_strtab, sym))) {
continue;
}
shn_abs = sym->st_shndx == SHN_ABS;
switch (r_type) {
case R_386_NONE:
case R_386_PC32:
case R_386_PC16:
case R_386_PC8:
/*
* NONE can be ignored and and PC relative
* relocations don't need to be adjusted.
*/
break;
case R_386_16:
symname = sym_name(sym_strtab, sym);
if (!use_real_mode)
goto bad;
if (shn_abs) {
if (is_reloc(S_ABS, symname))
break;
else if (!is_reloc(S_SEG, symname))
goto bad;
} else {
if (is_reloc(S_LIN, symname))
goto bad;
else
break;
}
visit(rel, sym);
break;
case R_386_32:
/* Visit relocations that need to be adjusted */
symname = sym_name(sym_strtab, sym);
if (shn_abs) {
if (is_reloc(S_ABS, symname))
break;
else if (!is_reloc(S_REL, symname))
goto bad;
} else {
if (use_real_mode &&
!is_reloc(S_LIN, symname))
break;
}
visit(rel, sym);
break;
default:
die("Unsupported relocation type: %s (%d)\n",
rel_type(r_type), r_type);
break;
bad:
symname = sym_name(sym_strtab, sym);
die("Invalid %s %s relocation: %s\n",
shn_abs ? "absolute" : "relative",
rel_type(r_type), symname);
}
}
}
@ -552,13 +630,19 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
static void count_reloc(Elf32_Rel *rel, Elf32_Sym *sym)
{
reloc_count += 1;
if (ELF32_R_TYPE(rel->r_info) == R_386_16)
reloc16_count++;
else
reloc_count++;
}
static void collect_reloc(Elf32_Rel *rel, Elf32_Sym *sym)
{
/* Remember the address that needs to be adjusted. */
relocs[reloc_idx++] = rel->r_offset;
if (ELF32_R_TYPE(rel->r_info) == R_386_16)
relocs16[reloc16_idx++] = rel->r_offset;
else
relocs[reloc_idx++] = rel->r_offset;
}
static int cmp_relocs(const void *va, const void *vb)
@ -568,23 +652,41 @@ static int cmp_relocs(const void *va, const void *vb)
return (*a == *b)? 0 : (*a > *b)? 1 : -1;
}
static void emit_relocs(int as_text)
static int write32(unsigned int v, FILE *f)
{
unsigned char buf[4];
put_unaligned_le32(v, buf);
return fwrite(buf, 1, 4, f) == 4 ? 0 : -1;
}
static void emit_relocs(int as_text, int use_real_mode)
{
int i;
/* Count how many relocations I have and allocate space for them. */
reloc_count = 0;
walk_relocs(count_reloc);
walk_relocs(count_reloc, use_real_mode);
relocs = malloc(reloc_count * sizeof(relocs[0]));
if (!relocs) {
die("malloc of %d entries for relocs failed\n",
reloc_count);
}
relocs16 = malloc(reloc16_count * sizeof(relocs[0]));
if (!relocs16) {
die("malloc of %d entries for relocs16 failed\n",
reloc16_count);
}
/* Collect up the relocations */
reloc_idx = 0;
walk_relocs(collect_reloc);
walk_relocs(collect_reloc, use_real_mode);
if (reloc16_count && !use_real_mode)
die("Segment relocations found but --realmode not specified\n");
/* Order the relocations for more efficient processing */
qsort(relocs, reloc_count, sizeof(relocs[0]), cmp_relocs);
qsort(relocs16, reloc16_count, sizeof(relocs16[0]), cmp_relocs);
/* Print the relocations */
if (as_text) {
@ -593,58 +695,83 @@ static void emit_relocs(int as_text)
*/
printf(".section \".data.reloc\",\"a\"\n");
printf(".balign 4\n");
for (i = 0; i < reloc_count; i++) {
printf("\t .long 0x%08lx\n", relocs[i]);
if (use_real_mode) {
printf("\t.long %lu\n", reloc16_count);
for (i = 0; i < reloc16_count; i++)
printf("\t.long 0x%08lx\n", relocs16[i]);
printf("\t.long %lu\n", reloc_count);
for (i = 0; i < reloc_count; i++) {
printf("\t.long 0x%08lx\n", relocs[i]);
}
} else {
/* Print a stop */
printf("\t.long 0x%08lx\n", (unsigned long)0);
for (i = 0; i < reloc_count; i++) {
printf("\t.long 0x%08lx\n", relocs[i]);
}
}
printf("\n");
}
else {
unsigned char buf[4];
/* Print a stop */
fwrite("\0\0\0\0", 4, 1, stdout);
/* Now print each relocation */
for (i = 0; i < reloc_count; i++) {
put_unaligned_le32(relocs[i], buf);
fwrite(buf, 4, 1, stdout);
if (use_real_mode) {
write32(reloc16_count, stdout);
for (i = 0; i < reloc16_count; i++)
write32(relocs16[i], stdout);
write32(reloc_count, stdout);
/* Now print each relocation */
for (i = 0; i < reloc_count; i++)
write32(relocs[i], stdout);
} else {
/* Print a stop */
write32(0, stdout);
/* Now print each relocation */
for (i = 0; i < reloc_count; i++) {
write32(relocs[i], stdout);
}
}
}
}
static void usage(void)
{
die("relocs [--abs-syms |--abs-relocs | --text] vmlinux\n");
die("relocs [--abs-syms|--abs-relocs|--text|--realmode] vmlinux\n");
}
int main(int argc, char **argv)
{
int show_absolute_syms, show_absolute_relocs;
int as_text;
int as_text, use_real_mode;
const char *fname;
FILE *fp;
int i;
regex_init();
show_absolute_syms = 0;
show_absolute_relocs = 0;
as_text = 0;
use_real_mode = 0;
fname = NULL;
for (i = 1; i < argc; i++) {
char *arg = argv[i];
if (*arg == '-') {
if (strcmp(argv[1], "--abs-syms") == 0) {
if (strcmp(arg, "--abs-syms") == 0) {
show_absolute_syms = 1;
continue;
}
if (strcmp(argv[1], "--abs-relocs") == 0) {
if (strcmp(arg, "--abs-relocs") == 0) {
show_absolute_relocs = 1;
continue;
}
else if (strcmp(argv[1], "--text") == 0) {
if (strcmp(arg, "--text") == 0) {
as_text = 1;
continue;
}
if (strcmp(arg, "--realmode") == 0) {
use_real_mode = 1;
continue;
}
}
else if (!fname) {
fname = arg;
@ -655,6 +782,7 @@ int main(int argc, char **argv)
if (!fname) {
usage();
}
regex_init(use_real_mode);
fp = fopen(fname, "r");
if (!fp) {
die("Cannot open %s: %s\n",
@ -673,6 +801,6 @@ int main(int argc, char **argv)
print_absolute_relocs();
return 0;
}
emit_relocs(as_text);
emit_relocs(as_text, use_real_mode);
return 0;
}

View File

@ -743,7 +743,7 @@ void __init printk_all_partitions(void)
struct hd_struct *part;
char name_buf[BDEVNAME_SIZE];
char devt_buf[BDEVT_SIZE];
u8 uuid[PARTITION_META_INFO_UUIDLTH * 2 + 1];
char uuid_buf[PARTITION_META_INFO_UUIDLTH * 2 + 5];
/*
* Don't show empty devices or things that have been
@ -762,14 +762,16 @@ void __init printk_all_partitions(void)
while ((part = disk_part_iter_next(&piter))) {
bool is_part0 = part == &disk->part0;
uuid[0] = 0;
uuid_buf[0] = '\0';
if (part->info)
part_unpack_uuid(part->info->uuid, uuid);
snprintf(uuid_buf, sizeof(uuid_buf), "%pU",
part->info->uuid);
printk("%s%s %10llu %s %s", is_part0 ? "" : " ",
bdevt_str(part_devt(part), devt_buf),
(unsigned long long)part->nr_sects >> 1,
disk_name(disk, part->partno, name_buf), uuid);
disk_name(disk, part->partno, name_buf),
uuid_buf);
if (is_part0) {
if (disk->driverfs_dev != NULL &&
disk->driverfs_dev->driver != NULL)

View File

@ -250,6 +250,10 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state)
return -ENODEV;
}
/* For D3cold we should execute _PS3, not _PS4. */
if (state == ACPI_STATE_D3_COLD)
object_name[3] = '3';
/*
* Transition Power
* ----------------

View File

@ -660,7 +660,7 @@ int acpi_power_on_resources(struct acpi_device *device, int state)
int acpi_power_transition(struct acpi_device *device, int state)
{
int result;
int result = 0;
if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD))
return -EINVAL;
@ -679,8 +679,11 @@ int acpi_power_transition(struct acpi_device *device, int state)
* (e.g. so the device doesn't lose power while transitioning). Then,
* we dereference all power resources used in the current list.
*/
result = acpi_power_on_list(&device->power.states[state].resources);
if (!result)
if (state < ACPI_STATE_D3_COLD)
result = acpi_power_on_list(
&device->power.states[state].resources);
if (!result && device->power.state < ACPI_STATE_D3_COLD)
acpi_power_off_list(
&device->power.states[device->power.state].resources);

View File

@ -908,6 +908,10 @@ static int acpi_bus_get_power_flags(struct acpi_device *device)
device->power.states[ACPI_STATE_D3].flags.valid = 1;
device->power.states[ACPI_STATE_D3].power = 0;
/* Set D3cold's explicit_set flag if _PS3 exists. */
if (device->power.states[ACPI_STATE_D3_HOT].flags.explicit_set)
device->power.states[ACPI_STATE_D3_COLD].flags.explicit_set = 1;
acpi_bus_init_power(device);
return 0;

View File

@ -6580,24 +6580,21 @@ static const struct file_operations dac960_user_command_proc_fops = {
static void DAC960_CreateProcEntries(DAC960_Controller_T *Controller)
{
struct proc_dir_entry *StatusProcEntry;
struct proc_dir_entry *ControllerProcEntry;
struct proc_dir_entry *UserCommandProcEntry;
if (DAC960_ProcDirectoryEntry == NULL) {
DAC960_ProcDirectoryEntry = proc_mkdir("rd", NULL);
StatusProcEntry = proc_create("status", 0,
DAC960_ProcDirectoryEntry,
&dac960_proc_fops);
DAC960_ProcDirectoryEntry = proc_mkdir("rd", NULL);
proc_create("status", 0, DAC960_ProcDirectoryEntry,
&dac960_proc_fops);
}
sprintf(Controller->ControllerName, "c%d", Controller->ControllerNumber);
ControllerProcEntry = proc_mkdir(Controller->ControllerName,
DAC960_ProcDirectoryEntry);
proc_create_data("initial_status", 0, ControllerProcEntry, &dac960_initial_status_proc_fops, Controller);
proc_create_data("current_status", 0, ControllerProcEntry, &dac960_current_status_proc_fops, Controller);
UserCommandProcEntry = proc_create_data("user_command", S_IWUSR | S_IRUSR, ControllerProcEntry, &dac960_user_command_proc_fops, Controller);
Controller->ControllerProcEntry = ControllerProcEntry;
sprintf(Controller->ControllerName, "c%d", Controller->ControllerNumber);
ControllerProcEntry = proc_mkdir(Controller->ControllerName,
DAC960_ProcDirectoryEntry);
proc_create_data("initial_status", 0, ControllerProcEntry, &dac960_initial_status_proc_fops, Controller);
proc_create_data("current_status", 0, ControllerProcEntry, &dac960_current_status_proc_fops, Controller);
proc_create_data("user_command", S_IWUSR | S_IRUSR, ControllerProcEntry, &dac960_user_command_proc_fops, Controller);
Controller->ControllerProcEntry = ControllerProcEntry;
}

View File

@ -2510,8 +2510,10 @@ static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
up(&dd->port->cmd_slot);
return NULL;
}
if (unlikely(*tag < 0))
if (unlikely(*tag < 0)) {
up(&dd->port->cmd_slot);
return NULL;
}
return dd->port->commands[*tag].sg;
}

View File

@ -1895,6 +1895,13 @@ static int virtcons_restore(struct virtio_device *vdev)
/* Get port open/close status on the host */
send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
/*
* If a port was open at the time of suspending, we
* have to let the host know that it's still open.
*/
if (port->guest_connected)
send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1);
}
return 0;
}

View File

@ -164,6 +164,7 @@ config CRYPTO_DEV_MV_CESA
select CRYPTO_ALGAPI
select CRYPTO_AES
select CRYPTO_BLKCIPHER2
select CRYPTO_HASH
help
This driver allows you to utilize the Cryptographic Engines and
Security Accelerator (CESA) which can be found on the Marvell Orion

View File

@ -245,7 +245,9 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
dev_vdbg(chan2dev(&atchan->chan_common),
"descriptor %u complete\n", txd->cookie);
dma_cookie_complete(txd);
/* mark the descriptor as complete for non cyclic cases only */
if (!atc_chan_is_cyclic(atchan))
dma_cookie_complete(txd);
/* move children to free_list */
list_splice_init(&desc->tx_list, &atchan->free_list);

View File

@ -703,7 +703,9 @@ static void ep93xx_dma_tasklet(unsigned long data)
desc = ep93xx_dma_get_active(edmac);
if (desc) {
if (desc->complete) {
dma_cookie_complete(&desc->txd);
/* mark descriptor complete for non cyclic case only */
if (!test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
dma_cookie_complete(&desc->txd);
list_splice_init(&edmac->active, &list);
}
callback = desc->txd.callback;

View File

@ -2322,7 +2322,8 @@ static void pl330_tasklet(unsigned long data)
/* Pick up ripe tomatoes */
list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
if (desc->status == DONE) {
dma_cookie_complete(&desc->txd);
if (pch->cyclic)
dma_cookie_complete(&desc->txd);
list_move_tail(&desc->node, &list);
}

View File

@ -1632,6 +1632,21 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti)
pool->low_water_blocks = pt->low_water_blocks;
pool->pf = pt->pf;
/*
* If discard_passdown was enabled verify that the data device
* supports discards. Disable discard_passdown if not; otherwise
* -EOPNOTSUPP will be returned.
*/
if (pt->pf.discard_passdown) {
struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
if (!q || !blk_queue_discard(q)) {
char buf[BDEVNAME_SIZE];
DMWARN("Discard unsupported by data device (%s): Disabling discard passdown.",
bdevname(pt->data_dev->bdev, buf));
pool->pf.discard_passdown = 0;
}
}
return 0;
}
@ -1988,19 +2003,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto out_flags_changed;
}
/*
* If discard_passdown was enabled verify that the data device
* supports discards. Disable discard_passdown if not; otherwise
* -EOPNOTSUPP will be returned.
*/
if (pf.discard_passdown) {
struct request_queue *q = bdev_get_queue(data_dev->bdev);
if (!q || !blk_queue_discard(q)) {
DMWARN("Discard unsupported by data device: Disabling discard passdown.");
pf.discard_passdown = 0;
}
}
pt->pool = pool;
pt->ti = ti;
pt->metadata_dev = metadata_dev;
@ -2385,7 +2387,7 @@ static int pool_status(struct dm_target *ti, status_type_t type,
(unsigned long long)pt->low_water_blocks);
count = !pool->pf.zero_new_blocks + !pool->pf.discard_enabled +
!pool->pf.discard_passdown;
!pt->pf.discard_passdown;
DMEMIT("%u ", count);
if (!pool->pf.zero_new_blocks)
@ -2394,7 +2396,7 @@ static int pool_status(struct dm_target *ti, status_type_t type,
if (!pool->pf.discard_enabled)
DMEMIT("ignore_discard ");
if (!pool->pf.discard_passdown)
if (!pt->pf.discard_passdown)
DMEMIT("no_discard_passdown ");
break;

View File

@ -391,6 +391,8 @@ void mddev_suspend(struct mddev *mddev)
synchronize_rcu();
wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
mddev->pers->quiesce(mddev, 1);
del_timer_sync(&mddev->safemode_timer);
}
EXPORT_SYMBOL_GPL(mddev_suspend);

View File

@ -3164,12 +3164,40 @@ raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks)
return size << conf->chunk_shift;
}
static void calc_sectors(struct r10conf *conf, sector_t size)
{
/* Calculate the number of sectors-per-device that will
* actually be used, and set conf->dev_sectors and
* conf->stride
*/
size = size >> conf->chunk_shift;
sector_div(size, conf->far_copies);
size = size * conf->raid_disks;
sector_div(size, conf->near_copies);
/* 'size' is now the number of chunks in the array */
/* calculate "used chunks per device" */
size = size * conf->copies;
/* We need to round up when dividing by raid_disks to
* get the stride size.
*/
size = DIV_ROUND_UP_SECTOR_T(size, conf->raid_disks);
conf->dev_sectors = size << conf->chunk_shift;
if (conf->far_offset)
conf->stride = 1 << conf->chunk_shift;
else {
sector_div(size, conf->far_copies);
conf->stride = size << conf->chunk_shift;
}
}
static struct r10conf *setup_conf(struct mddev *mddev)
{
struct r10conf *conf = NULL;
int nc, fc, fo;
sector_t stride, size;
int err = -EINVAL;
if (mddev->new_chunk_sectors < (PAGE_SIZE >> 9) ||
@ -3219,28 +3247,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
if (!conf->r10bio_pool)
goto out;
size = mddev->dev_sectors >> conf->chunk_shift;
sector_div(size, fc);
size = size * conf->raid_disks;
sector_div(size, nc);
/* 'size' is now the number of chunks in the array */
/* calculate "used chunks per device" in 'stride' */
stride = size * conf->copies;
/* We need to round up when dividing by raid_disks to
* get the stride size.
*/
stride += conf->raid_disks - 1;
sector_div(stride, conf->raid_disks);
conf->dev_sectors = stride << conf->chunk_shift;
if (fo)
stride = 1;
else
sector_div(stride, fc);
conf->stride = stride << conf->chunk_shift;
calc_sectors(conf, mddev->dev_sectors);
spin_lock_init(&conf->device_lock);
INIT_LIST_HEAD(&conf->retry_list);
@ -3468,7 +3475,8 @@ static int raid10_resize(struct mddev *mddev, sector_t sectors)
mddev->recovery_cp = oldsize;
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
}
mddev->dev_sectors = sectors;
calc_sectors(conf, sectors);
mddev->dev_sectors = conf->dev_sectors;
mddev->resync_max_sectors = size;
return 0;
}

View File

@ -493,7 +493,11 @@ out:
static void e1000_down_and_stop(struct e1000_adapter *adapter)
{
set_bit(__E1000_DOWN, &adapter->flags);
cancel_work_sync(&adapter->reset_task);
/* Only kill reset task if adapter is not resetting */
if (!test_bit(__E1000_RESETTING, &adapter->flags))
cancel_work_sync(&adapter->reset_task);
cancel_delayed_work_sync(&adapter->watchdog_task);
cancel_delayed_work_sync(&adapter->phy_info_task);
cancel_delayed_work_sync(&adapter->fifo_stall_task);

View File

@ -501,7 +501,9 @@ static void virtnet_napi_enable(struct virtnet_info *vi)
* We synchronize against interrupts via NAPI_STATE_SCHED */
if (napi_schedule_prep(&vi->napi)) {
virtqueue_disable_cb(vi->rvq);
local_bh_disable();
__napi_schedule(&vi->napi);
local_bh_enable();
}
}

View File

@ -223,7 +223,7 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
[PCI_D0] = ACPI_STATE_D0,
[PCI_D1] = ACPI_STATE_D1,
[PCI_D2] = ACPI_STATE_D2,
[PCI_D3hot] = ACPI_STATE_D3_HOT,
[PCI_D3hot] = ACPI_STATE_D3,
[PCI_D3cold] = ACPI_STATE_D3
};
int error = -EINVAL;

View File

@ -312,6 +312,7 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
int ret;
struct pl031_local *ldata;
struct rtc_class_ops *ops = id->data;
unsigned long time;
ret = amba_request_regions(adev, NULL);
if (ret)
@ -343,6 +344,23 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
writel(readl(ldata->base + RTC_CR) | RTC_CR_CWEN,
ldata->base + RTC_CR);
/*
* On ST PL031 variants, the RTC reset value does not provide correct
* weekday for 2000-01-01. Correct the erroneous sunday to saturday.
*/
if (ldata->hw_designer == AMBA_VENDOR_ST) {
if (readl(ldata->base + RTC_YDR) == 0x2000) {
time = readl(ldata->base + RTC_DR);
if ((time &
(RTC_MON_MASK | RTC_MDAY_MASK | RTC_WDAY_MASK))
== 0x02120000) {
time = time | (0x7 << RTC_WDAY_SHIFT);
writel(0x2000, ldata->base + RTC_YLR);
writel(time, ldata->base + RTC_LR);
}
}
}
ldata->rtc = rtc_device_register("pl031", &adev->dev, ops,
THIS_MODULE);
if (IS_ERR(ldata->rtc)) {

View File

@ -169,6 +169,7 @@ static struct se_device *fd_create_virtdevice(
inode = file->f_mapping->host;
if (S_ISBLK(inode->i_mode)) {
struct request_queue *q;
unsigned long long dev_size;
/*
* Setup the local scope queue_limits from struct request_queue->limits
* to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
@ -183,13 +184,12 @@ static struct se_device *fd_create_virtdevice(
* one (1) logical sector from underlying struct block_device
*/
fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
fd_dev->fd_dev_size = (i_size_read(file->f_mapping->host) -
dev_size = (i_size_read(file->f_mapping->host) -
fd_dev->fd_block_size);
pr_debug("FILEIO: Using size: %llu bytes from struct"
" block_device blocks: %llu logical_block_size: %d\n",
fd_dev->fd_dev_size,
div_u64(fd_dev->fd_dev_size, fd_dev->fd_block_size),
dev_size, div_u64(dev_size, fd_dev->fd_block_size),
fd_dev->fd_block_size);
} else {
if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
@ -605,10 +605,20 @@ static u32 fd_get_device_type(struct se_device *dev)
static sector_t fd_get_blocks(struct se_device *dev)
{
struct fd_dev *fd_dev = dev->dev_ptr;
unsigned long long blocks_long = div_u64(fd_dev->fd_dev_size,
dev->se_sub_dev->se_dev_attrib.block_size);
struct file *f = fd_dev->fd_file;
struct inode *i = f->f_mapping->host;
unsigned long long dev_size;
/*
* When using a file that references an underlying struct block_device,
* ensure dev_size is always based on the current inode size in order
* to handle underlying block_device resize operations.
*/
if (S_ISBLK(i->i_mode))
dev_size = (i_size_read(i) - fd_dev->fd_block_size);
else
dev_size = fd_dev->fd_dev_size;
return blocks_long;
return div_u64(dev_size, dev->se_sub_dev->se_dev_attrib.block_size);
}
static struct se_subsystem_api fileio_template = {

View File

@ -220,6 +220,9 @@ int target_scsi2_reservation_release(struct se_task *task)
if (dev->dev_reserved_node_acl != sess->se_node_acl)
goto out_unlock;
if (dev->dev_res_bin_isid != sess->sess_bin_isid)
goto out_unlock;
dev->dev_reserved_node_acl = NULL;
dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
if (dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID) {

View File

@ -390,6 +390,7 @@ static void __devexit virtballoon_remove(struct virtio_device *vdev)
/* There might be pages left in the balloon: free them. */
while (vb->num_pages)
leak_balloon(vb, vb->num_pages);
update_balloon_size(vb);
/* Now we reset the device so we can clean up the queues. */
vdev->config->reset(vdev);

View File

@ -505,9 +505,14 @@ EXPORT_SYMBOL(bio_clone);
int bio_get_nr_vecs(struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);
return min_t(unsigned,
int nr_pages;
nr_pages = min_t(unsigned,
queue_max_segments(q),
queue_max_sectors(q) / (PAGE_SIZE >> 9) + 1);
return min_t(unsigned, nr_pages, BIO_MAX_PAGES);
}
EXPORT_SYMBOL(bio_get_nr_vecs);

View File

@ -70,7 +70,7 @@ static void bdev_inode_switch_bdi(struct inode *inode,
spin_unlock(&dst->wb.list_lock);
}
static sector_t max_block(struct block_device *bdev)
sector_t blkdev_max_block(struct block_device *bdev)
{
sector_t retval = ~((sector_t)0);
loff_t sz = i_size_read(bdev->bd_inode);
@ -163,7 +163,7 @@ static int
blkdev_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh, int create)
{
if (iblock >= max_block(I_BDEV(inode))) {
if (iblock >= blkdev_max_block(I_BDEV(inode))) {
if (create)
return -EIO;
@ -185,7 +185,7 @@ static int
blkdev_get_blocks(struct inode *inode, sector_t iblock,
struct buffer_head *bh, int create)
{
sector_t end_block = max_block(I_BDEV(inode));
sector_t end_block = blkdev_max_block(I_BDEV(inode));
unsigned long max_blocks = bh->b_size >> inode->i_blkbits;
if ((iblock + max_blocks) > end_block) {

View File

@ -921,6 +921,7 @@ init_page_buffers(struct page *page, struct block_device *bdev,
struct buffer_head *head = page_buffers(page);
struct buffer_head *bh = head;
int uptodate = PageUptodate(page);
sector_t end_block = blkdev_max_block(I_BDEV(bdev->bd_inode));
do {
if (!buffer_mapped(bh)) {
@ -929,7 +930,8 @@ init_page_buffers(struct page *page, struct block_device *bdev,
bh->b_blocknr = block;
if (uptodate)
set_buffer_uptodate(bh);
set_buffer_mapped(bh);
if (block < end_block)
set_buffer_mapped(bh);
}
block++;
bh = bh->b_this_page;

View File

@ -1799,10 +1799,15 @@ static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd)
if (task) {
files = get_files_struct(task);
if (files) {
struct file *file;
rcu_read_lock();
if (fcheck_files(files, fd)) {
file = fcheck_files(files, fd);
if (file) {
unsigned i_mode, f_mode = file->f_mode;
rcu_read_unlock();
put_files_struct(files);
if (task_dumpable(task)) {
rcu_read_lock();
cred = __task_cred(task);
@ -1813,7 +1818,14 @@ static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd)
inode->i_uid = 0;
inode->i_gid = 0;
}
inode->i_mode &= ~(S_ISUID | S_ISGID);
i_mode = S_IFLNK;
if (f_mode & FMODE_READ)
i_mode |= S_IRUSR | S_IXUSR;
if (f_mode & FMODE_WRITE)
i_mode |= S_IWUSR | S_IXUSR;
inode->i_mode = i_mode;
security_task_to_inode(task, inode);
put_task_struct(task);
return 1;
@ -1837,8 +1849,6 @@ static struct dentry *proc_fd_instantiate(struct inode *dir,
struct dentry *dentry, struct task_struct *task, const void *ptr)
{
unsigned fd = *(const unsigned *)ptr;
struct file *file;
struct files_struct *files;
struct inode *inode;
struct proc_inode *ei;
struct dentry *error = ERR_PTR(-ENOENT);
@ -1848,25 +1858,6 @@ static struct dentry *proc_fd_instantiate(struct inode *dir,
goto out;
ei = PROC_I(inode);
ei->fd = fd;
files = get_files_struct(task);
if (!files)
goto out_iput;
inode->i_mode = S_IFLNK;
/*
* We are not taking a ref to the file structure, so we must
* hold ->file_lock.
*/
spin_lock(&files->file_lock);
file = fcheck_files(files, fd);
if (!file)
goto out_unlock;
if (file->f_mode & FMODE_READ)
inode->i_mode |= S_IRUSR | S_IXUSR;
if (file->f_mode & FMODE_WRITE)
inode->i_mode |= S_IWUSR | S_IXUSR;
spin_unlock(&files->file_lock);
put_files_struct(files);
inode->i_op = &proc_pid_link_inode_operations;
inode->i_size = 64;
@ -1879,12 +1870,6 @@ static struct dentry *proc_fd_instantiate(struct inode *dir,
out:
return error;
out_unlock:
spin_unlock(&files->file_lock);
put_files_struct(files);
out_iput:
iput(inode);
goto out;
}
static struct dentry *proc_lookupfd_common(struct inode *dir,
@ -2177,16 +2162,16 @@ static struct dentry *proc_map_files_lookup(struct inode *dir,
goto out;
result = ERR_PTR(-EACCES);
if (lock_trace(task))
if (!ptrace_may_access(task, PTRACE_MODE_READ))
goto out_put_task;
result = ERR_PTR(-ENOENT);
if (dname_to_vma_addr(dentry, &vm_start, &vm_end))
goto out_unlock;
goto out_put_task;
mm = get_task_mm(task);
if (!mm)
goto out_unlock;
goto out_put_task;
down_read(&mm->mmap_sem);
vma = find_exact_vma(mm, vm_start, vm_end);
@ -2198,8 +2183,6 @@ static struct dentry *proc_map_files_lookup(struct inode *dir,
out_no_vma:
up_read(&mm->mmap_sem);
mmput(mm);
out_unlock:
unlock_trace(task);
out_put_task:
put_task_struct(task);
out:
@ -2233,7 +2216,7 @@ proc_map_files_readdir(struct file *filp, void *dirent, filldir_t filldir)
goto out;
ret = -EACCES;
if (lock_trace(task))
if (!ptrace_may_access(task, PTRACE_MODE_READ))
goto out_put_task;
ret = 0;
@ -2241,12 +2224,12 @@ proc_map_files_readdir(struct file *filp, void *dirent, filldir_t filldir)
case 0:
ino = inode->i_ino;
if (filldir(dirent, ".", 1, 0, ino, DT_DIR) < 0)
goto out_unlock;
goto out_put_task;
filp->f_pos++;
case 1:
ino = parent_ino(dentry);
if (filldir(dirent, "..", 2, 1, ino, DT_DIR) < 0)
goto out_unlock;
goto out_put_task;
filp->f_pos++;
default:
{
@ -2257,7 +2240,7 @@ proc_map_files_readdir(struct file *filp, void *dirent, filldir_t filldir)
mm = get_task_mm(task);
if (!mm)
goto out_unlock;
goto out_put_task;
down_read(&mm->mmap_sem);
nr_files = 0;
@ -2287,7 +2270,7 @@ proc_map_files_readdir(struct file *filp, void *dirent, filldir_t filldir)
flex_array_free(fa);
up_read(&mm->mmap_sem);
mmput(mm);
goto out_unlock;
goto out_put_task;
}
for (i = 0, vma = mm->mmap, pos = 2; vma;
vma = vma->vm_next) {
@ -2332,8 +2315,6 @@ proc_map_files_readdir(struct file *filp, void *dirent, filldir_t filldir)
}
}
out_unlock:
unlock_trace(task);
out_put_task:
put_task_struct(task);
out:

View File

@ -1,9 +1,10 @@
#ifndef _LINUX_BLKDEV_H
#define _LINUX_BLKDEV_H
#include <linux/sched.h>
#ifdef CONFIG_BLOCK
#include <linux/sched.h>
#include <linux/major.h>
#include <linux/genhd.h>
#include <linux/list.h>

View File

@ -2051,6 +2051,7 @@ extern void unregister_blkdev(unsigned int, const char *);
extern struct block_device *bdget(dev_t);
extern struct block_device *bdgrab(struct block_device *bdev);
extern void bd_set_size(struct block_device *, loff_t size);
extern sector_t blkdev_max_block(struct block_device *bdev);
extern void bd_forget(struct inode *inode);
extern void bdput(struct block_device *);
extern void invalidate_bdev(struct block_device *);

View File

@ -179,6 +179,7 @@ enum {
TRACE_EVENT_FL_RECORDED_CMD_BIT,
TRACE_EVENT_FL_CAP_ANY_BIT,
TRACE_EVENT_FL_NO_SET_FILTER_BIT,
TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
};
enum {
@ -187,6 +188,7 @@ enum {
TRACE_EVENT_FL_RECORDED_CMD = (1 << TRACE_EVENT_FL_RECORDED_CMD_BIT),
TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
};
struct ftrace_event_call {

View File

@ -222,12 +222,6 @@ static inline void part_pack_uuid(const u8 *uuid_str, u8 *to)
}
}
static inline char *part_unpack_uuid(const u8 *uuid, char *out)
{
sprintf(out, "%pU", uuid);
return out;
}
static inline int disk_max_parts(struct gendisk *disk)
{
if (disk->flags & GENHD_FL_EXT_DEVT)

View File

@ -6382,6 +6382,8 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
if (!sg)
return -ENOMEM;
sg->next = sg;
*per_cpu_ptr(sdd->sg, j) = sg;
sgp = kzalloc_node(sizeof(struct sched_group_power),

View File

@ -294,6 +294,9 @@ static int __ftrace_set_clr_event(const char *match, const char *sub,
if (!call->name || !call->class || !call->class->reg)
continue;
if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
continue;
if (match &&
strcmp(match, call->name) != 0 &&
strcmp(match, call->class->system) != 0)
@ -1164,7 +1167,7 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
return -1;
}
if (call->class->reg)
if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
trace_create_file("enable", 0644, call->dir, call,
enable);

View File

@ -180,6 +180,7 @@ struct ftrace_event_call __used event_##call = { \
.event.type = etype, \
.class = &event_class_ftrace_##call, \
.print_fmt = print, \
.flags = TRACE_EVENT_FL_IGNORE_ENABLE, \
}; \
struct ftrace_event_call __used \
__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call;

View File

@ -5481,7 +5481,7 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
* part of thp split is not executed yet.
*/
if (pmd_trans_huge_lock(pmd, vma) == 1) {
if (!mc.precharge) {
if (mc.precharge < HPAGE_PMD_NR) {
spin_unlock(&vma->vm_mm->page_table_lock);
return 0;
}

View File

@ -2040,7 +2040,7 @@ static bool has_cpu_slab(int cpu, void *info)
struct kmem_cache *s = info;
struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
return !!(c->page);
return c->page || c->partial;
}
static void flush_all(struct kmem_cache *s)

View File

@ -3749,13 +3749,13 @@ static void __exit pg_cleanup(void)
{
struct pktgen_thread *t;
struct list_head *q, *n;
struct list_head list;
LIST_HEAD(list);
/* Stop all interfaces & threads */
pktgen_exiting = true;
mutex_lock(&pktgen_thread_lock);
list_splice(&list, &pktgen_threads);
list_splice_init(&pktgen_threads, &list);
mutex_unlock(&pktgen_thread_lock);
list_for_each_safe(q, n, &list) {

View File

@ -917,8 +917,7 @@ new_segment:
wait_for_sndbuf:
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
wait_for_memory:
if (copied)
tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
goto do_error;

View File

@ -8,6 +8,8 @@
# conmakehash: Create arrays for initializing the kernel console tables
# docproc: Used in Documentation/DocBook
HOST_EXTRACFLAGS += -I$(srctree)/tools/include
hostprogs-$(CONFIG_KALLSYMS) += kallsyms
hostprogs-$(CONFIG_LOGO) += pnmtologo
hostprogs-$(CONFIG_VT) += conmakehash

View File

@ -774,10 +774,10 @@ $(OUTPUT)perf.o perf.spec \
# over the general rule for .o
$(OUTPUT)util/%-flex.o: $(OUTPUT)util/%-flex.c $(OUTPUT)PERF-CFLAGS
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -Iutil/ -Wno-redundant-decls -Wno-switch-default -Wno-unused-function $<
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -Iutil/ -w $<
$(OUTPUT)util/%-bison.o: $(OUTPUT)util/%-bison.c $(OUTPUT)PERF-CFLAGS
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -Iutil/ -Wno-redundant-decls -Wno-switch-default -Wno-unused-function $<
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -Iutil/ -w $<
$(OUTPUT)%.o: %.c $(OUTPUT)PERF-CFLAGS
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $<

View File

@ -283,6 +283,8 @@ static int create_perf_stat_counter(struct perf_evsel *evsel,
{
struct perf_event_attr *attr = &evsel->attr;
struct xyarray *group_fd = NULL;
bool exclude_guest_missing = false;
int ret;
if (group && evsel != first)
group_fd = first->fd;
@ -293,16 +295,39 @@ static int create_perf_stat_counter(struct perf_evsel *evsel,
attr->inherit = !no_inherit;
if (system_wide)
return perf_evsel__open_per_cpu(evsel, evsel_list->cpus,
retry:
if (exclude_guest_missing)
evsel->attr.exclude_guest = evsel->attr.exclude_host = 0;
if (system_wide) {
ret = perf_evsel__open_per_cpu(evsel, evsel_list->cpus,
group, group_fd);
if (ret)
goto check_ret;
return 0;
}
if (!target_pid && !target_tid && (!group || evsel == first)) {
attr->disabled = 1;
attr->enable_on_exec = 1;
}
return perf_evsel__open_per_thread(evsel, evsel_list->threads,
group, group_fd);
ret = perf_evsel__open_per_thread(evsel, evsel_list->threads,
group, group_fd);
if (!ret)
return 0;
/* fall through */
check_ret:
if (ret && errno == EINVAL) {
if (!exclude_guest_missing &&
(evsel->attr.exclude_guest || evsel->attr.exclude_host)) {
pr_debug("Old kernel, cannot exclude "
"guest or host samples.\n");
exclude_guest_missing = true;
goto retry;
}
}
return ret;
}
/*
@ -463,8 +488,13 @@ static int run_perf_stat(int argc __used, const char **argv)
list_for_each_entry(counter, &evsel_list->entries, node) {
if (create_perf_stat_counter(counter, first) < 0) {
/*
* PPC returns ENXIO for HW counters until 2.6.37
* (behavior changed with commit b0a873e).
*/
if (errno == EINVAL || errno == ENOSYS ||
errno == ENOENT || errno == EOPNOTSUPP) {
errno == ENOENT || errno == EOPNOTSUPP ||
errno == ENXIO) {
if (verbose)
ui__warning("%s event is not supported by the kernel.\n",
event_name(counter));

View File

@ -296,7 +296,7 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
if (mkdir_p(filename, 0755))
goto out_free;
snprintf(filename + len, sizeof(filename) - len, "/%s", sbuild_id);
snprintf(filename + len, size - len, "/%s", sbuild_id);
if (access(filename, F_OK)) {
if (is_kallsyms) {