dect
/
linux-2.6
Archived
13
0
Fork 0

Merge branch 'master' into for-next

This commit is contained in:
Neil Brown 2008-07-11 21:57:40 +10:00
commit 0306d5efbf
110 changed files with 2194 additions and 371 deletions

1353
Documentation/ftrace.txt Normal file

File diff suppressed because it is too large Load Diff

View File

@ -148,9 +148,9 @@ tcp_available_congestion_control - STRING
but not loaded.
tcp_base_mss - INTEGER
The initial value of search_low to be used by Packetization Layer
Path MTU Discovery (MTU probing). If MTU probing is enabled,
this is the inital MSS used by the connection.
The initial value of search_low to be used by the packetization layer
Path MTU discovery (MTU probing). If MTU probing is enabled,
this is the initial MSS used by the connection.
tcp_congestion_control - STRING
Set the congestion control algorithm to be used for new
@ -185,10 +185,9 @@ tcp_frto - INTEGER
timeouts. It is particularly beneficial in wireless environments
where packet loss is typically due to random radio interference
rather than intermediate router congestion. F-RTO is sender-side
only modification. Therefore it does not require any support from
the peer, but in a typical case, however, where wireless link is
the local access link and most of the data flows downlink, the
faraway servers should have F-RTO enabled to take advantage of it.
only modification. Therefore it does not require any support from
the peer.
If set to 1, basic version is enabled. 2 enables SACK enhanced
F-RTO if flow uses SACK. The basic version can be used also when
SACK is in use though scenario(s) with it exists where F-RTO
@ -276,7 +275,7 @@ tcp_mem - vector of 3 INTEGERs: min, pressure, max
memory.
tcp_moderate_rcvbuf - BOOLEAN
If set, TCP performs receive buffer autotuning, attempting to
If set, TCP performs receive buffer auto-tuning, attempting to
automatically size the buffer (no greater than tcp_rmem[2]) to
match the size required by the path for full throughput. Enabled by
default.
@ -336,7 +335,7 @@ tcp_rmem - vector of 3 INTEGERs: min, default, max
pressure.
Default: 8K
default: default size of receive buffer used by TCP sockets.
default: initial size of receive buffer used by TCP sockets.
This value overrides net.core.rmem_default used by other protocols.
Default: 87380 bytes. This value results in window of 65535 with
default setting of tcp_adv_win_scale and tcp_app_win:0 and a bit
@ -344,8 +343,10 @@ tcp_rmem - vector of 3 INTEGERs: min, default, max
max: maximal size of receive buffer allowed for automatically
selected receiver buffers for TCP socket. This value does not override
net.core.rmem_max, "static" selection via SO_RCVBUF does not use this.
Default: 87380*2 bytes.
net.core.rmem_max. Calling setsockopt() with SO_RCVBUF disables
automatic tuning of that socket's receive buffer size, in which
case this value is ignored.
Default: between 87380B and 4MB, depending on RAM size.
tcp_sack - BOOLEAN
Enable select acknowledgments (SACKS).
@ -358,7 +359,7 @@ tcp_slow_start_after_idle - BOOLEAN
Default: 1
tcp_stdurg - BOOLEAN
Use the Host requirements interpretation of the TCP urg pointer field.
Use the Host requirements interpretation of the TCP urgent pointer field.
Most hosts use the older BSD interpretation, so if you turn this on
Linux might not communicate correctly with them.
Default: FALSE
@ -371,12 +372,12 @@ tcp_synack_retries - INTEGER
tcp_syncookies - BOOLEAN
Only valid when the kernel was compiled with CONFIG_SYNCOOKIES
Send out syncookies when the syn backlog queue of a socket
overflows. This is to prevent against the common 'syn flood attack'
overflows. This is to prevent against the common 'SYN flood attack'
Default: FALSE
Note, that syncookies is fallback facility.
It MUST NOT be used to help highly loaded servers to stand
against legal connection rate. If you see synflood warnings
against legal connection rate. If you see SYN flood warnings
in your logs, but investigation shows that they occur
because of overload with legal connections, you should tune
another parameters until this warning disappear.
@ -386,7 +387,7 @@ tcp_syncookies - BOOLEAN
to use TCP extensions, can result in serious degradation
of some services (f.e. SMTP relaying), visible not by you,
but your clients and relays, contacting you. While you see
synflood warnings in logs not being really flooded, your server
SYN flood warnings in logs not being really flooded, your server
is seriously misconfigured.
tcp_syn_retries - INTEGER
@ -419,19 +420,21 @@ tcp_window_scaling - BOOLEAN
Enable window scaling as defined in RFC1323.
tcp_wmem - vector of 3 INTEGERs: min, default, max
min: Amount of memory reserved for send buffers for TCP socket.
min: Amount of memory reserved for send buffers for TCP sockets.
Each TCP socket has rights to use it due to fact of its birth.
Default: 4K
default: Amount of memory allowed for send buffers for TCP socket
by default. This value overrides net.core.wmem_default used
by other protocols, it is usually lower than net.core.wmem_default.
default: initial size of send buffer used by TCP sockets. This
value overrides net.core.wmem_default used by other protocols.
It is usually lower than net.core.wmem_default.
Default: 16K
max: Maximal amount of memory allowed for automatically selected
send buffers for TCP socket. This value does not override
net.core.wmem_max, "static" selection via SO_SNDBUF does not use this.
Default: 128K
max: Maximal amount of memory allowed for automatically tuned
send buffers for TCP sockets. This value does not override
net.core.wmem_max. Calling setsockopt() with SO_SNDBUF disables
automatic tuning of that socket's send buffer size, in which case
this value is ignored.
Default: between 64K and 4MB, depending on RAM size.
tcp_workaround_signed_windows - BOOLEAN
If set, assume no receipt of a window scaling option means the
@ -1060,24 +1063,193 @@ bridge-nf-filter-pppoe-tagged - BOOLEAN
Default: 1
proc/sys/net/sctp/* Variables:
addip_enable - BOOLEAN
Enable or disable extension of Dynamic Address Reconfiguration
(ADD-IP) functionality specified in RFC5061. This extension provides
the ability to dynamically add and remove new addresses for the SCTP
associations.
1: Enable extension.
0: Disable extension.
Default: 0
addip_noauth_enable - BOOLEAN
Dynamic Address Reconfiguration (ADD-IP) requires the use of
authentication to protect the operations of adding or removing new
addresses. This requirement is mandated so that unauthorized hosts
would not be able to hijack associations. However, older
implementations may not have implemented this requirement while
allowing the ADD-IP extension. For reasons of interoperability,
we provide this variable to control the enforcement of the
authentication requirement.
1: Allow ADD-IP extension to be used without authentication. This
should only be set in a closed environment for interoperability
with older implementations.
0: Enforce the authentication requirement
Default: 0
auth_enable - BOOLEAN
Enable or disable Authenticated Chunks extension. This extension
provides the ability to send and receive authenticated chunks and is
required for secure operation of Dynamic Address Reconfiguration
(ADD-IP) extension.
1: Enable this extension.
0: Disable this extension.
Default: 0
prsctp_enable - BOOLEAN
Enable or disable the Partial Reliability extension (RFC3758) which
is used to notify peers that a given DATA should no longer be expected.
1: Enable extension
0: Disable
Default: 1
max_burst - INTEGER
The limit of the number of new packets that can be initially sent. It
controls how bursty the generated traffic can be.
Default: 4
association_max_retrans - INTEGER
Set the maximum number for retransmissions that an association can
attempt deciding that the remote end is unreachable. If this value
is exceeded, the association is terminated.
Default: 10
max_init_retransmits - INTEGER
The maximum number of retransmissions of INIT and COOKIE-ECHO chunks
that an association will attempt before declaring the destination
unreachable and terminating.
Default: 8
path_max_retrans - INTEGER
The maximum number of retransmissions that will be attempted on a given
path. Once this threshold is exceeded, the path is considered
unreachable, and new traffic will use a different path when the
association is multihomed.
Default: 5
rto_initial - INTEGER
The initial round trip timeout value in milliseconds that will be used
in calculating round trip times. This is the initial time interval
for retransmissions.
Default: 3000
rto_max - INTEGER
The maximum value (in milliseconds) of the round trip timeout. This
is the largest time interval that can elapse between retransmissions.
Default: 60000
rto_min - INTEGER
The minimum value (in milliseconds) of the round trip timeout. This
is the smallest time interval the can elapse between retransmissions.
Default: 1000
hb_interval - INTEGER
The interval (in milliseconds) between HEARTBEAT chunks. These chunks
are sent at the specified interval on idle paths to probe the state of
a given path between 2 associations.
Default: 30000
sack_timeout - INTEGER
The amount of time (in milliseconds) that the implementation will wait
to send a SACK.
Default: 200
valid_cookie_life - INTEGER
The default lifetime of the SCTP cookie (in milliseconds). The cookie
is used during association establishment.
Default: 60000
cookie_preserve_enable - BOOLEAN
Enable or disable the ability to extend the lifetime of the SCTP cookie
that is used during the establishment phase of SCTP association
1: Enable cookie lifetime extension.
0: Disable
Default: 1
rcvbuf_policy - INTEGER
Determines if the receive buffer is attributed to the socket or to
association. SCTP supports the capability to create multiple
associations on a single socket. When using this capability, it is
possible that a single stalled association that's buffering a lot
of data may block other associations from delivering their data by
consuming all of the receive buffer space. To work around this,
the rcvbuf_policy could be set to attribute the receiver buffer space
to each association instead of the socket. This prevents the described
blocking.
1: rcvbuf space is per association
0: recbuf space is per socket
Default: 0
sndbuf_policy - INTEGER
Similar to rcvbuf_policy above, this applies to send buffer space.
1: Send buffer is tracked per association
0: Send buffer is tracked per socket.
Default: 0
sctp_mem - vector of 3 INTEGERs: min, pressure, max
Number of pages allowed for queueing by all SCTP sockets.
min: Below this number of pages SCTP is not bothered about its
memory appetite. When amount of memory allocated by SCTP exceeds
this number, SCTP starts to moderate memory usage.
pressure: This value was introduced to follow format of tcp_mem.
max: Number of pages allowed for queueing by all SCTP sockets.
Default is calculated at boot time from amount of available memory.
sctp_rmem - vector of 3 INTEGERs: min, default, max
See tcp_rmem for a description.
sctp_wmem - vector of 3 INTEGERs: min, default, max
See tcp_wmem for a description.
UNDOCUMENTED:
dev_weight FIXME
discovery_slots FIXME
discovery_timeout FIXME
fast_poll_increase FIXME
ip6_queue_maxlen FIXME
lap_keepalive_time FIXME
lo_cong FIXME
max_baud_rate FIXME
max_dgram_qlen FIXME
max_noreply_time FIXME
max_tx_data_size FIXME
max_tx_window FIXME
min_tx_turn_time FIXME
mod_cong FIXME
no_cong FIXME
no_cong_thresh FIXME
slot_timeout FIXME
warn_noreply_time FIXME
/proc/sys/net/core/*
dev_weight FIXME
/proc/sys/net/unix/*
max_dgram_qlen FIXME
/proc/sys/net/irda/*
fast_poll_increase FIXME
warn_noreply_time FIXME
discovery_slots FIXME
slot_timeout FIXME
max_baud_rate FIXME
discovery_timeout FIXME
lap_keepalive_time FIXME
max_noreply_time FIXME
max_tx_data_size FIXME
max_tx_window FIXME
min_tx_turn_time FIXME

View File

@ -26,7 +26,7 @@
static unsigned long icache_size, dcache_size; /* Size in bytes */
static unsigned long icache_lsize, dcache_lsize; /* Size in bytes */
unsigned long __init r3k_cache_size(unsigned long ca_flags)
unsigned long __cpuinit r3k_cache_size(unsigned long ca_flags)
{
unsigned long flags, status, dummy, size;
volatile unsigned long *p;
@ -61,7 +61,7 @@ unsigned long __init r3k_cache_size(unsigned long ca_flags)
return size * sizeof(*p);
}
unsigned long __init r3k_cache_lsize(unsigned long ca_flags)
unsigned long __cpuinit r3k_cache_lsize(unsigned long ca_flags)
{
unsigned long flags, status, lsize, i;
volatile unsigned long *p;
@ -90,7 +90,7 @@ unsigned long __init r3k_cache_lsize(unsigned long ca_flags)
return lsize * sizeof(*p);
}
static void __init r3k_probe_cache(void)
static void __cpuinit r3k_probe_cache(void)
{
dcache_size = r3k_cache_size(ST0_ISC);
if (dcache_size)

View File

@ -235,13 +235,12 @@ static void __cpuinit set_prefetch_parameters(void)
}
/*
* Too much unrolling will overflow the available space in
* clear_space_array / copy_page_array. 8 words sounds generous,
* but a R4000 with 128 byte L2 line length can exceed even that.
* clear_space_array / copy_page_array.
*/
half_clear_loop_size = min(8 * clear_word_size,
half_clear_loop_size = min(16 * clear_word_size,
max(cache_line_size >> 1,
4 * clear_word_size));
half_copy_loop_size = min(8 * copy_word_size,
half_copy_loop_size = min(16 * copy_word_size,
max(cache_line_size >> 1,
4 * copy_word_size));
}
@ -263,21 +262,23 @@ static inline void __cpuinit build_clear_pref(u32 **buf, int off)
if (pref_bias_clear_store) {
uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off,
A0);
} else if (cpu_has_cache_cdex_s) {
uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
} else if (cpu_has_cache_cdex_p) {
if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) {
uasm_i_nop(buf);
uasm_i_nop(buf);
uasm_i_nop(buf);
uasm_i_nop(buf);
} else if (cache_line_size == (half_clear_loop_size << 1)) {
if (cpu_has_cache_cdex_s) {
uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
} else if (cpu_has_cache_cdex_p) {
if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) {
uasm_i_nop(buf);
uasm_i_nop(buf);
uasm_i_nop(buf);
uasm_i_nop(buf);
}
if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
uasm_i_lw(buf, ZERO, ZERO, AT);
uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
}
}
if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
uasm_i_lw(buf, ZERO, ZERO, AT);
uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
}
}
void __cpuinit build_clear_page(void)
@ -403,20 +404,22 @@ static inline void build_copy_store_pref(u32 **buf, int off)
if (pref_bias_copy_store) {
uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off,
A0);
} else if (cpu_has_cache_cdex_s) {
uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
} else if (cpu_has_cache_cdex_p) {
if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) {
uasm_i_nop(buf);
uasm_i_nop(buf);
uasm_i_nop(buf);
uasm_i_nop(buf);
} else if (cache_line_size == (half_copy_loop_size << 1)) {
if (cpu_has_cache_cdex_s) {
uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
} else if (cpu_has_cache_cdex_p) {
if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) {
uasm_i_nop(buf);
uasm_i_nop(buf);
uasm_i_nop(buf);
uasm_i_nop(buf);
}
if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
uasm_i_lw(buf, ZERO, ZERO, AT);
uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
}
if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
uasm_i_lw(buf, ZERO, ZERO, AT);
uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
}
}

View File

@ -86,7 +86,7 @@ static void rm7k_sc_inv(unsigned long addr, unsigned long size)
/*
* This function is executed in uncached address space.
*/
static __init void __rm7k_sc_enable(void)
static __cpuinit void __rm7k_sc_enable(void)
{
int i;
@ -107,7 +107,7 @@ static __init void __rm7k_sc_enable(void)
}
}
static __init void rm7k_sc_enable(void)
static __cpuinit void rm7k_sc_enable(void)
{
if (read_c0_config() & RM7K_CONF_SE)
return;

View File

@ -33,13 +33,14 @@ static struct legacy_serial_info {
phys_addr_t taddr;
} legacy_serial_infos[MAX_LEGACY_SERIAL_PORTS];
static struct __initdata of_device_id parents[] = {
static struct __initdata of_device_id legacy_serial_parents[] = {
{.type = "soc",},
{.type = "tsi-bridge",},
{.type = "opb", },
{.compatible = "ibm,opb",},
{.compatible = "simple-bus",},
{.compatible = "wrs,epld-localbus",},
{},
};
static unsigned int legacy_serial_count;
@ -327,7 +328,7 @@ void __init find_legacy_serial_ports(void)
struct device_node *parent = of_get_parent(np);
if (!parent)
continue;
if (of_match_node(parents, parent) != NULL) {
if (of_match_node(legacy_serial_parents, parent) != NULL) {
index = add_legacy_soc_port(np, np);
if (index >= 0 && np == stdout)
legacy_serial_console = index;

View File

@ -76,6 +76,8 @@ struct of_device* of_platform_device_create(struct device_node *np,
return NULL;
dev->dma_mask = 0xffffffffUL;
dev->dev.coherent_dma_mask = DMA_32BIT_MASK;
dev->dev.bus = &of_platform_bus_type;
/* We do not fill the DMA ops for platform devices by default.

View File

@ -77,7 +77,6 @@ include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
KERNEL_DEFINES = $(strip -Derrno=kernel_errno -Dsigprocmask=kernel_sigprocmask \
-Dmktime=kernel_mktime $(ARCH_KERNEL_DEFINES))
KBUILD_CFLAGS += $(KERNEL_DEFINES)
KBUILD_CFLAGS += $(call cc-option,-fno-unit-at-a-time,)
PHONY += linux

View File

@ -32,4 +32,11 @@ cflags-y += $(call cc-option,-mpreferred-stack-boundary=2)
# an unresolved reference.
cflags-y += -ffreestanding
# Disable unit-at-a-time mode on pre-gcc-4.0 compilers, it makes gcc use
# a lot more stack due to the lack of sharing of stacklots. Also, gcc
# 4.3.0 needs -funit-at-a-time for extern inline functions.
KBUILD_CFLAGS += $(shell if [ $(call cc-version) -lt 0400 ] ; then \
echo $(call cc-option,-fno-unit-at-a-time); \
else echo $(call cc-option,-funit-at-a-time); fi ;)
KBUILD_CFLAGS += $(cflags-y)

View File

@ -21,3 +21,6 @@ HEADER_ARCH := x86
LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib64
LINK-y += -m64
# Do unit-at-a-time unconditionally on x86_64, following the host
KBUILD_CFLAGS += $(call cc-option,-funit-at-a-time)

View File

@ -1,2 +1,3 @@
vsyscall.lds
vsyscall_32.lds
vmlinux.lds

View File

@ -300,6 +300,29 @@ void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
}
EXPORT_SYMBOL(ioremap_cache);
static void __iomem *ioremap_default(resource_size_t phys_addr,
unsigned long size)
{
unsigned long flags;
void *ret;
int err;
/*
* - WB for WB-able memory and no other conflicting mappings
* - UC_MINUS for non-WB-able memory with no other conflicting mappings
* - Inherit from confliting mappings otherwise
*/
err = reserve_memtype(phys_addr, phys_addr + size, -1, &flags);
if (err < 0)
return NULL;
ret = (void *) __ioremap_caller(phys_addr, size, flags,
__builtin_return_address(0));
free_memtype(phys_addr, phys_addr + size);
return (void __iomem *)ret;
}
/**
* iounmap - Free a IO remapping
* @addr: virtual address from ioremap_*
@ -365,7 +388,7 @@ void *xlate_dev_mem_ptr(unsigned long phys)
if (page_is_ram(start >> PAGE_SHIFT))
return __va(phys);
addr = (void *)ioremap(start, PAGE_SIZE);
addr = (void *)ioremap_default(start, PAGE_SIZE);
if (addr)
addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));

View File

@ -328,18 +328,18 @@ static struct dmi_system_id __devinitdata pciprobe_dmi_table[] = {
#endif
{
.callback = set_bf_sort,
.ident = "HP ProLiant DL360",
.ident = "HP ProLiant DL385 G2",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL360"),
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL385 G2"),
},
},
{
.callback = set_bf_sort,
.ident = "HP ProLiant DL380",
.ident = "HP ProLiant DL585 G2",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL380"),
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL585 G2"),
},
},
{}

View File

@ -117,6 +117,7 @@ static int chainiv_init(struct crypto_tfm *tfm)
static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx)
{
int queued;
int err = ctx->err;
if (!ctx->queue.qlen) {
smp_mb__before_clear_bit();
@ -131,7 +132,7 @@ static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx)
BUG_ON(!queued);
out:
return ctx->err;
return err;
}
static int async_chainiv_postpone_request(struct skcipher_givcrypt_request *req)
@ -227,6 +228,7 @@ static void async_chainiv_do_postponed(struct work_struct *work)
postponed);
struct skcipher_givcrypt_request *req;
struct ablkcipher_request *subreq;
int err;
/* Only handle one request at a time to avoid hogging keventd. */
spin_lock_bh(&ctx->lock);
@ -241,7 +243,11 @@ static void async_chainiv_do_postponed(struct work_struct *work)
subreq = skcipher_givcrypt_reqctx(req);
subreq->base.flags |= CRYPTO_TFM_REQ_MAY_SLEEP;
async_chainiv_givencrypt_tail(req);
err = async_chainiv_givencrypt_tail(req);
local_bh_disable();
skcipher_givcrypt_complete(req, err);
local_bh_enable();
}
static int async_chainiv_init(struct crypto_tfm *tfm)

View File

@ -586,12 +586,6 @@ static void test_cipher(char *algo, int enc,
j = 0;
for (i = 0; i < tcount; i++) {
data = kzalloc(template[i].ilen, GFP_KERNEL);
if (!data)
continue;
memcpy(data, template[i].input, template[i].ilen);
if (template[i].iv)
memcpy(iv, template[i].iv, MAX_IVLEN);
else
@ -613,10 +607,8 @@ static void test_cipher(char *algo, int enc,
printk("setkey() failed flags=%x\n",
crypto_ablkcipher_get_flags(tfm));
if (!template[i].fail) {
kfree(data);
if (!template[i].fail)
goto out;
}
}
temp = 0;

View File

@ -76,7 +76,7 @@ struct palm_bk3710_udmatiming {
#include "../ide-timing.h"
static long ide_palm_clk;
static unsigned ideclk_period; /* in nanoseconds */
static const struct palm_bk3710_udmatiming palm_bk3710_udmatimings[6] = {
{160, 240}, /* UDMA Mode 0 */
@ -86,8 +86,6 @@ static const struct palm_bk3710_udmatiming palm_bk3710_udmatimings[6] = {
{85, 60}, /* UDMA Mode 4 */
};
static struct clk *ideclkp;
static void palm_bk3710_setudmamode(void __iomem *base, unsigned int dev,
unsigned int mode)
{
@ -97,10 +95,10 @@ static void palm_bk3710_setudmamode(void __iomem *base, unsigned int dev,
/* DMA Data Setup */
t0 = DIV_ROUND_UP(palm_bk3710_udmatimings[mode].cycletime,
ide_palm_clk) - 1;
tenv = DIV_ROUND_UP(20, ide_palm_clk) - 1;
ideclk_period) - 1;
tenv = DIV_ROUND_UP(20, ideclk_period) - 1;
trp = DIV_ROUND_UP(palm_bk3710_udmatimings[mode].rptime,
ide_palm_clk) - 1;
ideclk_period) - 1;
/* udmatim Register */
val16 = readw(base + BK3710_UDMATIM) & (dev ? 0xFF0F : 0xFFF0);
@ -141,8 +139,8 @@ static void palm_bk3710_setdmamode(void __iomem *base, unsigned int dev,
cycletime = max_t(int, t->cycle, min_cycle);
/* DMA Data Setup */
t0 = DIV_ROUND_UP(cycletime, ide_palm_clk);
td = DIV_ROUND_UP(t->active, ide_palm_clk);
t0 = DIV_ROUND_UP(cycletime, ideclk_period);
td = DIV_ROUND_UP(t->active, ideclk_period);
tkw = t0 - td - 1;
td -= 1;
@ -168,9 +166,9 @@ static void palm_bk3710_setpiomode(void __iomem *base, ide_drive_t *mate,
struct ide_timing *t;
/* PIO Data Setup */
t0 = DIV_ROUND_UP(cycletime, ide_palm_clk);
t0 = DIV_ROUND_UP(cycletime, ideclk_period);
t2 = DIV_ROUND_UP(ide_timing_find_mode(XFER_PIO_0 + mode)->active,
ide_palm_clk);
ideclk_period);
t2i = t0 - t2 - 1;
t2 -= 1;
@ -192,8 +190,8 @@ static void palm_bk3710_setpiomode(void __iomem *base, ide_drive_t *mate,
/* TASKFILE Setup */
t = ide_timing_find_mode(XFER_PIO_0 + mode);
t0 = DIV_ROUND_UP(t->cyc8b, ide_palm_clk);
t2 = DIV_ROUND_UP(t->act8b, ide_palm_clk);
t0 = DIV_ROUND_UP(t->cyc8b, ideclk_period);
t2 = DIV_ROUND_UP(t->act8b, ideclk_period);
t2i = t0 - t2 - 1;
t2 -= 1;
@ -350,22 +348,22 @@ static const struct ide_port_info __devinitdata palm_bk3710_port_info = {
static int __devinit palm_bk3710_probe(struct platform_device *pdev)
{
struct clk *clkp;
struct clk *clk;
struct resource *mem, *irq;
ide_hwif_t *hwif;
unsigned long base;
unsigned long base, rate;
int i;
hw_regs_t hw;
u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
clkp = clk_get(NULL, "IDECLK");
if (IS_ERR(clkp))
clk = clk_get(NULL, "IDECLK");
if (IS_ERR(clk))
return -ENODEV;
ideclkp = clkp;
clk_enable(ideclkp);
ide_palm_clk = clk_get_rate(ideclkp)/100000;
ide_palm_clk = (10000/ide_palm_clk) + 1;
clk_enable(clk);
rate = clk_get_rate(clk);
ideclk_period = 1000000000UL / rate;
/* Register the IDE interface with Linux ATA Interface */
memset(&hw, 0, sizeof(hw));

View File

@ -1218,16 +1218,12 @@ static void drive_release_dev (struct device *dev)
complete(&drive->gendev_rel_comp);
}
#ifndef ide_default_irq
#define ide_default_irq(irq) 0
#endif
static int hwif_init(ide_hwif_t *hwif)
{
int old_irq;
if (!hwif->irq) {
hwif->irq = ide_default_irq(hwif->io_ports.data_addr);
hwif->irq = __ide_default_irq(hwif->io_ports.data_addr);
if (!hwif->irq) {
printk("%s: DISABLED, NO IRQ\n", hwif->name);
return 0;
@ -1257,7 +1253,7 @@ static int hwif_init(ide_hwif_t *hwif)
* It failed to initialise. Find the default IRQ for
* this port and try that.
*/
hwif->irq = ide_default_irq(hwif->io_ports.data_addr);
hwif->irq = __ide_default_irq(hwif->io_ports.data_addr);
if (!hwif->irq) {
printk("%s: Disabled unable to get IRQ %d.\n",
hwif->name, old_irq);

View File

@ -184,8 +184,7 @@ static const struct ide_port_info it8213_chipsets[] __devinitdata = {
static int __devinit it8213_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
ide_setup_pci_device(dev, &it8213_chipsets[id->driver_data]);
return 0;
return ide_setup_pci_device(dev, &it8213_chipsets[id->driver_data]);
}
static const struct pci_device_id it8213_pci_tbl[] = {

View File

@ -225,10 +225,6 @@ static int ns87415_dma_setup(ide_drive_t *drive)
return 1;
}
#ifndef ide_default_irq
#define ide_default_irq(irq) 0
#endif
static void __devinit init_hwif_ns87415 (ide_hwif_t *hwif)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
@ -288,7 +284,7 @@ static void __devinit init_hwif_ns87415 (ide_hwif_t *hwif)
}
if (!using_inta)
hwif->irq = ide_default_irq(hwif->io_ports.data_addr);
hwif->irq = __ide_default_irq(hwif->io_ports.data_addr);
else if (!hwif->irq && hwif->mate && hwif->mate->irq)
hwif->irq = hwif->mate->irq; /* share IRQ with mate */

View File

@ -1096,7 +1096,9 @@ static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr, ch
struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
PDBG("%s dev 0x%p\n", __func__, dev);
rtnl_lock();
lldev->ethtool_ops->get_drvinfo(lldev, &info);
rtnl_unlock();
return sprintf(buf, "%s\n", info.fw_version);
}
@ -1109,7 +1111,9 @@ static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
PDBG("%s dev 0x%p\n", __func__, dev);
rtnl_lock();
lldev->ethtool_ops->get_drvinfo(lldev, &info);
rtnl_unlock();
return sprintf(buf, "%s\n", info.driver);
}

View File

@ -114,6 +114,7 @@ static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
unsigned int nob = data->blocks;
unsigned long long clks;
unsigned int timeout;
bool dalgn = 0;
u32 dcmd;
int i;
@ -152,6 +153,9 @@ static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
host->sg_cpu[i].dcmd = dcmd | length;
if (length & 31 && !(data->flags & MMC_DATA_READ))
host->sg_cpu[i].dcmd |= DCMD_ENDIRQEN;
/* Not aligned to 8-byte boundary? */
if (sg_dma_address(&data->sg[i]) & 0x7)
dalgn = 1;
if (data->flags & MMC_DATA_READ) {
host->sg_cpu[i].dsadr = host->res->start + MMC_RXFIFO;
host->sg_cpu[i].dtadr = sg_dma_address(&data->sg[i]);
@ -165,6 +169,15 @@ static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
host->sg_cpu[host->dma_len - 1].ddadr = DDADR_STOP;
wmb();
/*
* The PXA27x DMA controller encounters overhead when working with
* unaligned (to 8-byte boundaries) data, so switch on byte alignment
* mode only if we have unaligned data.
*/
if (dalgn)
DALGN |= (1 << host->dma);
else
DALGN &= (1 << host->dma);
DDADR(host->dma) = host->sg_dma;
DCSR(host->dma) = DCSR_RUN;
}

View File

@ -40,7 +40,7 @@
#include <asm/io.h>
#define DRV_NAME "ehea"
#define DRV_VERSION "EHEA_0091"
#define DRV_VERSION "EHEA_0092"
/* eHEA capability flags */
#define DLPAR_PORT_ADD_REM 1
@ -452,7 +452,7 @@ struct ehea_bcmc_reg_entry {
struct ehea_bcmc_reg_array {
struct ehea_bcmc_reg_entry *arr;
int num_entries;
struct mutex lock;
spinlock_t lock;
};
#define EHEA_PORT_UP 1
@ -478,6 +478,7 @@ struct ehea_port {
int num_add_tx_qps;
int num_mcs;
int resets;
u64 flags;
u64 mac_addr;
u32 logical_port_id;
u32 port_speed;
@ -501,7 +502,8 @@ struct port_res_cfg {
};
enum ehea_flag_bits {
__EHEA_STOP_XFER
__EHEA_STOP_XFER,
__EHEA_DISABLE_PORT_RESET
};
void ehea_set_ethtool_ops(struct net_device *netdev);

View File

@ -118,6 +118,7 @@ static struct of_device_id ehea_device_table[] = {
},
{},
};
MODULE_DEVICE_TABLE(of, ehea_device_table);
static struct of_platform_driver ehea_driver = {
.name = "ehea",
@ -137,6 +138,12 @@ void ehea_dump(void *adr, int len, char *msg)
}
}
void ehea_schedule_port_reset(struct ehea_port *port)
{
if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags))
schedule_work(&port->reset_task);
}
static void ehea_update_firmware_handles(void)
{
struct ehea_fw_handle_entry *arr = NULL;
@ -241,7 +248,7 @@ static void ehea_update_bcmc_registrations(void)
}
if (num_registrations) {
arr = kzalloc(num_registrations * sizeof(*arr), GFP_KERNEL);
arr = kzalloc(num_registrations * sizeof(*arr), GFP_ATOMIC);
if (!arr)
return; /* Keep the existing array */
} else
@ -301,7 +308,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
memset(stats, 0, sizeof(*stats));
cb2 = kzalloc(PAGE_SIZE, GFP_KERNEL);
cb2 = kzalloc(PAGE_SIZE, GFP_ATOMIC);
if (!cb2) {
ehea_error("no mem for cb2");
goto out;
@ -587,7 +594,7 @@ static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
"Resetting port.", pr->qp->init_attr.qp_nr);
ehea_dump(cqe, sizeof(*cqe), "CQE");
}
schedule_work(&pr->port->reset_task);
ehea_schedule_port_reset(pr->port);
return 1;
}
@ -616,7 +623,7 @@ static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
*tcph = tcp_hdr(skb);
/* check if ip header and tcp header are complete */
if (iph->tot_len < ip_len + tcp_hdrlen(skb))
if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
return -1;
*hdr_flags = LRO_IPV4 | LRO_TCP;
@ -765,7 +772,7 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
ehea_error("Send Completion Error: Resetting port");
if (netif_msg_tx_err(pr->port))
ehea_dump(cqe, sizeof(*cqe), "Send CQE");
schedule_work(&pr->port->reset_task);
ehea_schedule_port_reset(pr->port);
break;
}
@ -885,7 +892,7 @@ static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
eqe = ehea_poll_eq(port->qp_eq);
}
schedule_work(&port->reset_task);
ehea_schedule_port_reset(port);
return IRQ_HANDLED;
}
@ -1763,7 +1770,7 @@ static int ehea_set_mac_addr(struct net_device *dev, void *sa)
memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
mutex_lock(&ehea_bcmc_regs.lock);
spin_lock(&ehea_bcmc_regs.lock);
/* Deregister old MAC in pHYP */
if (port->state == EHEA_PORT_UP) {
@ -1785,7 +1792,7 @@ static int ehea_set_mac_addr(struct net_device *dev, void *sa)
out_upregs:
ehea_update_bcmc_registrations();
mutex_unlock(&ehea_bcmc_regs.lock);
spin_unlock(&ehea_bcmc_regs.lock);
out_free:
kfree(cb0);
out:
@ -1947,7 +1954,7 @@ static void ehea_set_multicast_list(struct net_device *dev)
}
ehea_promiscuous(dev, 0);
mutex_lock(&ehea_bcmc_regs.lock);
spin_lock(&ehea_bcmc_regs.lock);
if (dev->flags & IFF_ALLMULTI) {
ehea_allmulti(dev, 1);
@ -1978,7 +1985,7 @@ static void ehea_set_multicast_list(struct net_device *dev)
}
out:
ehea_update_bcmc_registrations();
mutex_unlock(&ehea_bcmc_regs.lock);
spin_unlock(&ehea_bcmc_regs.lock);
return;
}
@ -2497,7 +2504,7 @@ static int ehea_up(struct net_device *dev)
}
}
mutex_lock(&ehea_bcmc_regs.lock);
spin_lock(&ehea_bcmc_regs.lock);
ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
if (ret) {
@ -2520,7 +2527,7 @@ out:
ehea_info("Failed starting %s. ret=%i", dev->name, ret);
ehea_update_bcmc_registrations();
mutex_unlock(&ehea_bcmc_regs.lock);
spin_unlock(&ehea_bcmc_regs.lock);
ehea_update_firmware_handles();
mutex_unlock(&ehea_fw_handles.lock);
@ -2575,7 +2582,7 @@ static int ehea_down(struct net_device *dev)
mutex_lock(&ehea_fw_handles.lock);
mutex_lock(&ehea_bcmc_regs.lock);
spin_lock(&ehea_bcmc_regs.lock);
ehea_drop_multicast_list(dev);
ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
@ -2584,7 +2591,7 @@ static int ehea_down(struct net_device *dev)
port->state = EHEA_PORT_DOWN;
ehea_update_bcmc_registrations();
mutex_unlock(&ehea_bcmc_regs.lock);
spin_unlock(&ehea_bcmc_regs.lock);
ret = ehea_clean_all_portres(port);
if (ret)
@ -2605,13 +2612,14 @@ static int ehea_stop(struct net_device *dev)
if (netif_msg_ifdown(port))
ehea_info("disabling port %s", dev->name);
set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
cancel_work_sync(&port->reset_task);
mutex_lock(&port->port_lock);
netif_stop_queue(dev);
port_napi_disable(port);
ret = ehea_down(dev);
mutex_unlock(&port->port_lock);
clear_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
return ret;
}
@ -2941,7 +2949,7 @@ static void ehea_tx_watchdog(struct net_device *dev)
if (netif_carrier_ok(dev) &&
!test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
schedule_work(&port->reset_task);
ehea_schedule_port_reset(port);
}
int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
@ -3590,7 +3598,7 @@ int __init ehea_module_init(void)
memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
mutex_init(&ehea_fw_handles.lock);
mutex_init(&ehea_bcmc_regs.lock);
spin_lock_init(&ehea_bcmc_regs.lock);
ret = check_module_parm();
if (ret)

View File

@ -4194,12 +4194,23 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
netif_carrier_off(dev);
if (netif_running(dev)) {
unsigned long flags;
nv_disable_irq(dev);
netif_tx_lock_bh(dev);
spin_lock(&np->lock);
/* with plain spinlock lockdep complains */
spin_lock_irqsave(&np->lock, flags);
/* stop engines */
/* FIXME:
* this can take some time, and interrupts are disabled
* due to spin_lock_irqsave, but let's hope no daemon
* is going to change the settings very often...
* Worst case:
* NV_RXSTOP_DELAY1MAX + NV_TXSTOP_DELAY1MAX
* + some minor delays, which is up to a second approximately
*/
nv_stop_rxtx(dev);
spin_unlock(&np->lock);
spin_unlock_irqrestore(&np->lock, flags);
netif_tx_unlock_bh(dev);
}

View File

@ -463,6 +463,9 @@ static void restart(struct net_device *dev)
else
C32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
/* Restore multicast and promiscuous settings */
set_multicast_list(dev);
S32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT);
}

View File

@ -1636,6 +1636,12 @@ static int emac_poll_rx(void *param, int budget)
goto next;
}
if (len < ETH_HLEN) {
++dev->estats.rx_dropped_stack;
emac_recycle_rx_skb(dev, slot, len);
goto next;
}
if (len && len < EMAC_RX_COPY_THRESH) {
struct sk_buff *copy_skb =
alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
@ -2719,6 +2725,8 @@ static int __devinit emac_probe(struct of_device *ofdev,
/* Clean rings */
memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
memset(dev->tx_skb, 0, NUM_TX_BUFF * sizeof(struct sk_buff *));
memset(dev->rx_skb, 0, NUM_RX_BUFF * sizeof(struct sk_buff *));
/* Attach to ZMII, if needed */
if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&

View File

@ -152,6 +152,7 @@ static chipio_t pnp_info;
static const struct pnp_device_id nsc_ircc_pnp_table[] = {
{ .id = "NSC6001", .driver_data = 0 },
{ .id = "IBM0071", .driver_data = 0 },
{ .id = "HWPC224", .driver_data = 0 },
{ }
};

View File

@ -1546,6 +1546,7 @@ static int via_ircc_net_open(struct net_device *dev)
IRDA_WARNING("%s, unable to allocate dma2=%d\n",
driver_name, self->io.dma2);
free_irq(self->io.irq, self);
free_dma(self->io.dma);
return -EAGAIN;
}
}
@ -1606,6 +1607,8 @@ static int via_ircc_net_close(struct net_device *dev)
EnAllInt(iobase, OFF);
free_irq(self->io.irq, dev);
free_dma(self->io.dma);
if (self->io.dma2 != self->io.dma)
free_dma(self->io.dma2);
return 0;
}

View File

@ -277,7 +277,7 @@ static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
*tcph = tcp_hdr(skb);
/* check if ip header and tcp header are complete */
if (iph->tot_len < ip_len + tcp_hdrlen(skb))
if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
return -1;
*hdr_flags = LRO_IPV4 | LRO_TCP;

View File

@ -602,6 +602,12 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
tun->attached = 1;
get_net(dev_net(tun->dev));
/* Make sure persistent devices do not get stuck in
* xoff state.
*/
if (netif_running(tun->dev))
netif_wake_queue(tun->dev);
strcpy(ifr->ifr_name, tun->dev->name);
return 0;

View File

@ -1008,6 +1008,7 @@ static int fr_rx(struct sk_buff *skb)
stats->rx_bytes += skb->len;
if (pvc->state.becn)
stats->rx_compressed++;
skb->dev = dev;
netif_rx(skb);
return NET_RX_SUCCESS;
} else {

View File

@ -777,8 +777,10 @@ static int hostap_cs_suspend(struct pcmcia_device *link)
int dev_open = 0;
struct hostap_interface *iface = NULL;
if (dev)
iface = netdev_priv(dev);
if (!dev)
return -ENODEV;
iface = netdev_priv(dev);
PDEBUG(DEBUG_EXTRA, "%s: CS_EVENT_PM_SUSPEND\n", dev_info);
if (iface && iface->local)
@ -798,8 +800,10 @@ static int hostap_cs_resume(struct pcmcia_device *link)
int dev_open = 0;
struct hostap_interface *iface = NULL;
if (dev)
iface = netdev_priv(dev);
if (!dev)
return -ENODEV;
iface = netdev_priv(dev);
PDEBUG(DEBUG_EXTRA, "%s: CS_EVENT_PM_RESUME\n", dev_info);

View File

@ -449,7 +449,7 @@ static void iwl3945_dbg_report_frame(struct iwl3945_priv *priv,
if (print_summary) {
char *title;
u32 rate;
int rate;
if (hundred)
title = "100Frames";
@ -487,7 +487,7 @@ static void iwl3945_dbg_report_frame(struct iwl3945_priv *priv,
* but you can hack it to show more, if you'd like to. */
if (dataframe)
IWL_DEBUG_RX("%s: mhd=0x%04x, dst=0x%02x, "
"len=%u, rssi=%d, chnl=%d, rate=%u, \n",
"len=%u, rssi=%d, chnl=%d, rate=%d, \n",
title, fc, header->addr1[5],
length, rssi, channel, rate);
else {
@ -588,8 +588,12 @@ static void iwl3945_add_radiotap(struct iwl3945_priv *priv,
if (rate == -1)
iwl3945_rt->rt_rate = 0;
else
else {
if (stats->band == IEEE80211_BAND_5GHZ)
rate += IWL_FIRST_OFDM_RATE;
iwl3945_rt->rt_rate = iwl3945_rates[rate].ieee;
}
/* antenna number */
antenna = phy_flags_hw & RX_RES_PHY_FLAGS_ANTENNA_MSK;

View File

@ -3528,8 +3528,12 @@ static void iwl4965_add_radiotap(struct iwl_priv *priv,
if (rate == -1)
iwl4965_rt->rt_rate = 0;
else
else {
if (stats->band == IEEE80211_BAND_5GHZ)
rate += IWL_FIRST_OFDM_RATE;
iwl4965_rt->rt_rate = iwl4965_rates[rate].ieee;
}
/*
* "antenna number"

View File

@ -6687,7 +6687,8 @@ static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) {
IWL_DEBUG_MAC80211("leave - monitor\n");
return -1;
dev_kfree_skb_any(skb);
return 0;
}
IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,

View File

@ -6237,7 +6237,8 @@ static int iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) {
IWL_DEBUG_MAC80211("leave - monitor\n");
return -1;
dev_kfree_skb_any(skb);
return 0;
}
IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,

View File

@ -925,6 +925,7 @@ static struct usb_driver if_usb_driver = {
.id_table = if_usb_table,
.suspend = if_usb_suspend,
.resume = if_usb_resume,
.reset_resume = if_usb_resume,
};
static int __init if_usb_init_module(void)

View File

@ -567,11 +567,11 @@ static int lbs_process_bss(struct bss_descriptor *bss,
pos += 8;
/* beacon interval is 2 bytes long */
bss->beaconperiod = le16_to_cpup((void *) pos);
bss->beaconperiod = get_unaligned_le16(pos);
pos += 2;
/* capability information is 2 bytes long */
bss->capability = le16_to_cpup((void *) pos);
bss->capability = get_unaligned_le16(pos);
lbs_deb_scan("process_bss: capabilities 0x%04x\n", bss->capability);
pos += 2;

View File

@ -731,6 +731,17 @@ static int rt2400pci_init_registers(struct rt2x00_dev *rt2x00dev)
(rt2x00dev->rx->data_size / 128));
rt2x00pci_register_write(rt2x00dev, CSR9, reg);
rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 0);
rt2x00_set_field32(&reg, CSR14_TSF_SYNC, 0);
rt2x00_set_field32(&reg, CSR14_TBCN, 0);
rt2x00_set_field32(&reg, CSR14_TCFP, 0);
rt2x00_set_field32(&reg, CSR14_TATIMW, 0);
rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0);
rt2x00_set_field32(&reg, CSR14_CFP_COUNT_PRELOAD, 0);
rt2x00_set_field32(&reg, CSR14_TBCM_PRELOAD, 0);
rt2x00pci_register_write(rt2x00dev, CSR14, reg);
rt2x00pci_register_write(rt2x00dev, CNT3, 0x3f080000);
rt2x00pci_register_read(rt2x00dev, ARCSR0, &reg);

View File

@ -824,6 +824,17 @@ static int rt2500pci_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field32(&reg, CSR11_CW_SELECT, 0);
rt2x00pci_register_write(rt2x00dev, CSR11, reg);
rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 0);
rt2x00_set_field32(&reg, CSR14_TSF_SYNC, 0);
rt2x00_set_field32(&reg, CSR14_TBCN, 0);
rt2x00_set_field32(&reg, CSR14_TCFP, 0);
rt2x00_set_field32(&reg, CSR14_TATIMW, 0);
rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0);
rt2x00_set_field32(&reg, CSR14_CFP_COUNT_PRELOAD, 0);
rt2x00_set_field32(&reg, CSR14_TBCM_PRELOAD, 0);
rt2x00pci_register_write(rt2x00dev, CSR14, reg);
rt2x00pci_register_write(rt2x00dev, CNT3, 0);
rt2x00pci_register_read(rt2x00dev, TXCSR8, &reg);

View File

@ -801,6 +801,13 @@ static int rt2500usb_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field16(&reg, TXRX_CSR8_BBP_ID1_VALID, 0);
rt2500usb_register_write(rt2x00dev, TXRX_CSR8, reg);
rt2500usb_register_read(rt2x00dev, TXRX_CSR19, &reg);
rt2x00_set_field16(&reg, TXRX_CSR19_TSF_COUNT, 0);
rt2x00_set_field16(&reg, TXRX_CSR19_TSF_SYNC, 0);
rt2x00_set_field16(&reg, TXRX_CSR19_TBCN, 0);
rt2x00_set_field16(&reg, TXRX_CSR19_BEACON_GEN, 0);
rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
rt2500usb_register_write(rt2x00dev, TXRX_CSR21, 0xe78f);
rt2500usb_register_write(rt2x00dev, MAC_CSR9, 0xff1d);

View File

@ -1201,6 +1201,15 @@ static int rt61pci_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field32(&reg, TXRX_CSR8_ACK_CTS_54MBS, 42);
rt2x00pci_register_write(rt2x00dev, TXRX_CSR8, reg);
rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg);
rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_INTERVAL, 0);
rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 0);
rt2x00_set_field32(&reg, TXRX_CSR9_TSF_SYNC, 0);
rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 0);
rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
rt2x00_set_field32(&reg, TXRX_CSR9_TIMESTAMP_COMPENSATE, 0);
rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
rt2x00pci_register_write(rt2x00dev, TXRX_CSR15, 0x0000000f);
rt2x00pci_register_write(rt2x00dev, MAC_CSR6, 0x00000fff);

View File

@ -1006,6 +1006,15 @@ static int rt73usb_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field32(&reg, TXRX_CSR8_ACK_CTS_54MBS, 42);
rt73usb_register_write(rt2x00dev, TXRX_CSR8, reg);
rt73usb_register_read(rt2x00dev, TXRX_CSR9, &reg);
rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_INTERVAL, 0);
rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 0);
rt2x00_set_field32(&reg, TXRX_CSR9_TSF_SYNC, 0);
rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 0);
rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
rt2x00_set_field32(&reg, TXRX_CSR9_TIMESTAMP_COMPENSATE, 0);
rt73usb_register_write(rt2x00dev, TXRX_CSR9, reg);
rt73usb_register_write(rt2x00dev, TXRX_CSR15, 0x0000000f);
rt73usb_register_read(rt2x00dev, MAC_CSR6, &reg);

View File

@ -765,6 +765,7 @@ static void zd_op_remove_interface(struct ieee80211_hw *hw,
{
struct zd_mac *mac = zd_hw_mac(hw);
mac->type = IEEE80211_IF_TYPE_INVALID;
zd_set_beacon_interval(&mac->chip, 0);
zd_write_mac_addr(&mac->chip, NULL);
}

View File

@ -64,6 +64,7 @@ static struct usb_device_id usb_ids[] = {
{ USB_DEVICE(0x079b, 0x0062), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x1582, 0x6003), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x050d, 0x705c), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x083a, 0xe506), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x083a, 0x4505), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0471, 0x1236), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x13b1, 0x0024), .driver_info = DEVICE_ZD1211B },

View File

@ -537,6 +537,13 @@ int ssb_pcicore_dev_irqvecs_enable(struct ssb_pcicore *pc,
int err = 0;
u32 tmp;
if (dev->bus->bustype != SSB_BUSTYPE_PCI) {
/* This SSB device is not on a PCI host-bus. So the IRQs are
* not routed through the PCI core.
* So we must not enable routing through the PCI core. */
goto out;
}
if (!pdev)
goto out;
bus = pdev->bus;

View File

@ -924,6 +924,15 @@ static int register_root_hub(struct usb_hcd *hcd)
return retval;
}
void usb_enable_root_hub_irq (struct usb_bus *bus)
{
struct usb_hcd *hcd;
hcd = container_of (bus, struct usb_hcd, self);
if (hcd->driver->hub_irq_enable && hcd->state != HC_STATE_HALT)
hcd->driver->hub_irq_enable (hcd);
}
/*-------------------------------------------------------------------------*/

View File

@ -210,6 +210,8 @@ struct hc_driver {
int (*bus_suspend)(struct usb_hcd *);
int (*bus_resume)(struct usb_hcd *);
int (*start_port_reset)(struct usb_hcd *, unsigned port_num);
void (*hub_irq_enable)(struct usb_hcd *);
/* Needed only if port-change IRQs are level-triggered */
/* force handover of high-speed port to full-speed companion */
void (*relinquish_port)(struct usb_hcd *, int);

View File

@ -2073,6 +2073,8 @@ int usb_port_resume(struct usb_device *udev)
}
clear_bit(port1, hub->busy_bits);
if (!hub->hdev->parent && !hub->busy_bits[0])
usb_enable_root_hub_irq(hub->hdev->bus);
if (status == 0)
status = finish_port_resume(udev);
@ -3002,6 +3004,11 @@ static void hub_events(void)
hub->activating = 0;
/* If this is a root hub, tell the HCD it's okay to
* re-enable port-change interrupts now. */
if (!hdev->parent && !hub->busy_bits[0])
usb_enable_root_hub_irq(hdev->bus);
loop_autopm:
/* Allow autosuspend if we're not going to run again */
if (list_empty(&hub->event_list))
@ -3227,6 +3234,8 @@ int usb_reset_device(struct usb_device *udev)
break;
}
clear_bit(port1, parent_hub->busy_bits);
if (!parent_hdev->parent && !parent_hub->busy_bits[0])
usb_enable_root_hub_irq(parent_hdev->bus);
if (ret < 0)
goto re_enumerate;

View File

@ -261,6 +261,7 @@ static const struct hc_driver ohci_at91_hc_driver = {
*/
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
.hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,

View File

@ -288,6 +288,7 @@ static const struct hc_driver ohci_au1xxx_hc_driver = {
*/
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
.hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,

View File

@ -135,6 +135,7 @@ static struct hc_driver ohci_ep93xx_hc_driver = {
.get_frame_number = ohci_get_frame,
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
.hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,

View File

@ -36,6 +36,18 @@
/*-------------------------------------------------------------------------*/
/* hcd->hub_irq_enable() */
static void ohci_rhsc_enable (struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
spin_lock_irq(&ohci->lock);
if (!ohci->autostop)
del_timer(&hcd->rh_timer); /* Prevent next poll */
ohci_writel(ohci, OHCI_INTR_RHSC, &ohci->regs->intrenable);
spin_unlock_irq(&ohci->lock);
}
#define OHCI_SCHED_ENABLES \
(OHCI_CTRL_CLE|OHCI_CTRL_BLE|OHCI_CTRL_PLE|OHCI_CTRL_IE)
@ -362,28 +374,18 @@ static int ohci_root_hub_state_changes(struct ohci_hcd *ohci, int changed,
int any_connected)
{
int poll_rh = 1;
int rhsc;
rhsc = ohci_readl(ohci, &ohci->regs->intrenable) & OHCI_INTR_RHSC;
switch (ohci->hc_control & OHCI_CTRL_HCFS) {
case OHCI_USB_OPER:
/* If no status changes are pending, enable status-change
* interrupts.
*/
if (!rhsc && !changed) {
rhsc = OHCI_INTR_RHSC;
ohci_writel(ohci, rhsc, &ohci->regs->intrenable);
}
/* Keep on polling until we know a device is connected
* and RHSC is enabled, or until we autostop.
*/
/* keep on polling until we know a device is connected
* and RHSC is enabled */
if (!ohci->autostop) {
if (any_connected ||
!device_may_wakeup(&ohci_to_hcd(ohci)
->self.root_hub->dev)) {
if (rhsc)
if (ohci_readl(ohci, &ohci->regs->intrenable) &
OHCI_INTR_RHSC)
poll_rh = 0;
} else {
ohci->autostop = 1;
@ -396,13 +398,12 @@ static int ohci_root_hub_state_changes(struct ohci_hcd *ohci, int changed,
ohci->autostop = 0;
ohci->next_statechange = jiffies +
STATECHANGE_DELAY;
} else if (rhsc && time_after_eq(jiffies,
} else if (time_after_eq(jiffies,
ohci->next_statechange)
&& !ohci->ed_rm_list
&& !(ohci->hc_control &
OHCI_SCHED_ENABLES)) {
ohci_rh_suspend(ohci, 1);
poll_rh = 0;
}
}
break;
@ -416,12 +417,6 @@ static int ohci_root_hub_state_changes(struct ohci_hcd *ohci, int changed,
else
usb_hcd_resume_root_hub(ohci_to_hcd(ohci));
} else {
if (!rhsc && (ohci->autostop ||
ohci_to_hcd(ohci)->self.root_hub->
do_remote_wakeup))
ohci_writel(ohci, OHCI_INTR_RHSC,
&ohci->regs->intrenable);
/* everything is idle, no need for polling */
poll_rh = 0;
}
@ -443,16 +438,12 @@ static inline int ohci_rh_resume(struct ohci_hcd *ohci)
static int ohci_root_hub_state_changes(struct ohci_hcd *ohci, int changed,
int any_connected)
{
/* If RHSC is enabled, don't poll */
if (ohci_readl(ohci, &ohci->regs->intrenable) & OHCI_INTR_RHSC)
return 0;
int poll_rh = 1;
/* If no status changes are pending, enable status-change interrupts */
if (!changed) {
ohci_writel(ohci, OHCI_INTR_RHSC, &ohci->regs->intrenable);
return 0;
}
return 1;
/* keep on polling until RHSC is enabled */
if (ohci_readl(ohci, &ohci->regs->intrenable) & OHCI_INTR_RHSC)
poll_rh = 0;
return poll_rh;
}
#endif /* CONFIG_PM */

View File

@ -193,6 +193,7 @@ static const struct hc_driver ohci_lh7a404_hc_driver = {
*/
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
.hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,

View File

@ -466,6 +466,7 @@ static const struct hc_driver ohci_omap_hc_driver = {
*/
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
.hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,

View File

@ -327,6 +327,7 @@ static const struct hc_driver ohci_pci_hc_driver = {
*/
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
.hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,

View File

@ -280,6 +280,7 @@ static const struct hc_driver ohci_pnx4008_hc_driver = {
*/
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
.hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,

View File

@ -201,6 +201,7 @@ static const struct hc_driver ohci_pnx8550_hc_driver = {
*/
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
.hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,

View File

@ -72,6 +72,7 @@ static const struct hc_driver ohci_ppc_of_hc_driver = {
*/
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
.hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,

View File

@ -172,6 +172,7 @@ static const struct hc_driver ohci_ppc_soc_hc_driver = {
*/
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
.hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,

View File

@ -68,6 +68,7 @@ static const struct hc_driver ps3_ohci_hc_driver = {
.get_frame_number = ohci_get_frame,
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
.hub_irq_enable = ohci_rhsc_enable,
.start_port_reset = ohci_start_port_reset,
#if defined(CONFIG_PM)
.bus_suspend = ohci_bus_suspend,

View File

@ -298,6 +298,7 @@ static const struct hc_driver ohci_pxa27x_hc_driver = {
*/
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
.hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,

View File

@ -466,6 +466,7 @@ static const struct hc_driver ohci_s3c2410_hc_driver = {
*/
.hub_status_data = ohci_s3c2410_hub_status_data,
.hub_control = ohci_s3c2410_hub_control,
.hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,

View File

@ -231,6 +231,7 @@ static const struct hc_driver ohci_sa1111_hc_driver = {
*/
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
.hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,

View File

@ -68,6 +68,7 @@ static const struct hc_driver ohci_sh_hc_driver = {
*/
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
.hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,

View File

@ -75,6 +75,7 @@ static const struct hc_driver ohci_sm501_hc_driver = {
*/
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
.hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,

View File

@ -81,6 +81,7 @@ static const struct hc_driver ssb_ohci_hc_driver = {
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
.hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,

View File

@ -2934,6 +2934,16 @@ static int u132_start_port_reset(struct usb_hcd *hcd, unsigned port_num)
return 0;
}
static void u132_hub_irq_enable(struct usb_hcd *hcd)
{
struct u132 *u132 = hcd_to_u132(hcd);
if (u132->going > 1) {
dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
, u132->going);
} else if (u132->going > 0)
dev_err(&u132->platform_dev->dev, "device is being removed\n");
}
#ifdef CONFIG_PM
static int u132_bus_suspend(struct usb_hcd *hcd)
@ -2985,6 +2995,7 @@ static struct hc_driver u132_hc_driver = {
.bus_suspend = u132_bus_suspend,
.bus_resume = u132_bus_resume,
.start_port_reset = u132_start_port_reset,
.hub_irq_enable = u132_hub_irq_enable,
};
/*

View File

@ -1324,7 +1324,7 @@ static int fsl_diu_suspend(struct of_device *ofdev, pm_message_t state)
{
struct fsl_diu_data *machine_data;
machine_data = dev_get_drvdata(&dev->dev);
machine_data = dev_get_drvdata(&ofdev->dev);
disable_lcdc(machine_data->fsl_diu_info[0]);
return 0;
@ -1334,7 +1334,7 @@ static int fsl_diu_resume(struct of_device *ofdev)
{
struct fsl_diu_data *machine_data;
machine_data = dev_get_drvdata(&dev->dev);
machine_data = dev_get_drvdata(&ofdev->dev);
enable_lcdc(machine_data->fsl_diu_info[0]);
return 0;

View File

@ -610,7 +610,7 @@ int setup_arg_pages(struct linux_binprm *bprm,
bprm->exec -= stack_shift;
down_write(&mm->mmap_sem);
vm_flags = vma->vm_flags;
vm_flags = VM_STACK_FLAGS;
/*
* Adjust stack execute permissions; explicitly enable for

View File

@ -204,7 +204,7 @@ int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page *page)
* Note: assumes we have exclusive access to this mapping either
* through inode->i_mutex or some other mechanism.
*/
if (page->index == 0 && invalidate_inode_pages2_range(inode->i_mapping, PAGE_CACHE_SIZE, -1) < 0) {
if (invalidate_inode_pages2_range(inode->i_mapping, page->index + 1, -1) < 0) {
/* Should never happen */
nfs_zap_mapping(inode, inode->i_mapping);
}

View File

@ -606,7 +606,9 @@ static void dlm_init_lockres(struct dlm_ctxt *dlm,
res->last_used = 0;
spin_lock(&dlm->spinlock);
list_add_tail(&res->tracking, &dlm->tracking_list);
spin_unlock(&dlm->spinlock);
memset(res->lvb, 0, DLM_LVB_LEN);
memset(res->refmap, 0, sizeof(res->refmap));

View File

@ -1554,8 +1554,8 @@ out:
*/
int ocfs2_file_lock(struct file *file, int ex, int trylock)
{
int ret, level = ex ? LKM_EXMODE : LKM_PRMODE;
unsigned int lkm_flags = trylock ? LKM_NOQUEUE : 0;
int ret, level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
unsigned int lkm_flags = trylock ? DLM_LKF_NOQUEUE : 0;
unsigned long flags;
struct ocfs2_file_private *fp = file->private_data;
struct ocfs2_lock_res *lockres = &fp->fp_flock;
@ -1582,7 +1582,7 @@ int ocfs2_file_lock(struct file *file, int ex, int trylock)
* Get the lock at NLMODE to start - that way we
* can cancel the upconvert request if need be.
*/
ret = ocfs2_lock_create(osb, lockres, LKM_NLMODE, 0);
ret = ocfs2_lock_create(osb, lockres, DLM_LOCK_NL, 0);
if (ret < 0) {
mlog_errno(ret);
goto out;
@ -1597,7 +1597,7 @@ int ocfs2_file_lock(struct file *file, int ex, int trylock)
}
lockres->l_action = OCFS2_AST_CONVERT;
lkm_flags |= LKM_CONVERT;
lkm_flags |= DLM_LKF_CONVERT;
lockres->l_requested = level;
lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
@ -1664,7 +1664,7 @@ void ocfs2_file_unlock(struct file *file)
if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED))
return;
if (lockres->l_level == LKM_NLMODE)
if (lockres->l_level == DLM_LOCK_NL)
return;
mlog(0, "Unlock: \"%s\" flags: 0x%lx, level: %d, act: %d\n",
@ -1678,11 +1678,11 @@ void ocfs2_file_unlock(struct file *file)
lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
lockres->l_blocking = DLM_LOCK_EX;
gen = ocfs2_prepare_downconvert(lockres, LKM_NLMODE);
gen = ocfs2_prepare_downconvert(lockres, DLM_LOCK_NL);
lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
spin_unlock_irqrestore(&lockres->l_lock, flags);
ret = ocfs2_downconvert_lock(osb, lockres, LKM_NLMODE, 0, gen);
ret = ocfs2_downconvert_lock(osb, lockres, DLM_LOCK_NL, 0, gen);
if (ret) {
mlog_errno(ret);
return;

View File

@ -45,6 +45,8 @@ void reiserfs_delete_inode(struct inode *inode)
goto out;
reiserfs_update_inode_transaction(inode);
reiserfs_discard_prealloc(&th, inode);
err = reiserfs_delete_object(&th, inode);
/* Do quota update inside a transaction for journaled quotas. We must do that

View File

@ -49,12 +49,6 @@ static inline pte_t huge_pte_wrprotect(pte_t pte)
return pte_wrprotect(pte);
}
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
ptep_set_wrprotect(mm, addr, ptep);
}
static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty)

View File

@ -314,6 +314,16 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
old = pte_update(mm, addr, ptep, _PAGE_RW, 0);
}
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
unsigned long old;
if ((pte_val(*ptep) & _PAGE_RW) == 0)
return;
old = pte_update(mm, addr, ptep, _PAGE_RW, 1);
}
/*
* We currently remove entries from the hashtable regardless of whether
* the entry was young or dirty. The generic routines only flush if the

View File

@ -223,6 +223,9 @@ extern char empty_zero_page[PAGE_SIZE];
#define _PAGE_SPECIAL 0x004 /* SW associated with special page */
#define __HAVE_ARCH_PTE_SPECIAL
/* Set of bits not changed in pte_modify */
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL)
/* Six different types of pages. */
#define _PAGE_TYPE_EMPTY 0x400
#define _PAGE_TYPE_NONE 0x401
@ -681,7 +684,7 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
*/
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
pte_val(pte) &= PAGE_MASK;
pte_val(pte) &= _PAGE_CHG_MASK;
pte_val(pte) |= pgprot_val(newprot);
return pte;
}

View File

@ -71,7 +71,8 @@ static inline long kvm_hypercall0(unsigned int nr)
long ret;
asm volatile(KVM_HYPERCALL
: "=a"(ret)
: "a"(nr));
: "a"(nr)
: "memory");
return ret;
}
@ -80,7 +81,8 @@ static inline long kvm_hypercall1(unsigned int nr, unsigned long p1)
long ret;
asm volatile(KVM_HYPERCALL
: "=a"(ret)
: "a"(nr), "b"(p1));
: "a"(nr), "b"(p1)
: "memory");
return ret;
}
@ -90,7 +92,8 @@ static inline long kvm_hypercall2(unsigned int nr, unsigned long p1,
long ret;
asm volatile(KVM_HYPERCALL
: "=a"(ret)
: "a"(nr), "b"(p1), "c"(p2));
: "a"(nr), "b"(p1), "c"(p2)
: "memory");
return ret;
}
@ -100,7 +103,8 @@ static inline long kvm_hypercall3(unsigned int nr, unsigned long p1,
long ret;
asm volatile(KVM_HYPERCALL
: "=a"(ret)
: "a"(nr), "b"(p1), "c"(p2), "d"(p3));
: "a"(nr), "b"(p1), "c"(p2), "d"(p3)
: "memory");
return ret;
}
@ -111,7 +115,8 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
long ret;
asm volatile(KVM_HYPERCALL
: "=a"(ret)
: "a"(nr), "b"(p1), "c"(p2), "d"(p3), "S"(p4));
: "a"(nr), "b"(p1), "c"(p2), "d"(p3), "S"(p4)
: "memory");
return ret;
}

View File

@ -189,6 +189,21 @@ static inline void ide_std_init_ports(hw_regs_t *hw,
hw->io_ports.ctl_addr = ctl_addr;
}
/* for IDE PCI controllers in legacy mode, temporary */
static inline int __ide_default_irq(unsigned long base)
{
switch (base) {
#ifdef CONFIG_IA64
case 0x1f0: return isa_irq_to_vector(14);
case 0x170: return isa_irq_to_vector(15);
#else
case 0x1f0: return 14;
case 0x170: return 15;
#endif
}
return 0;
}
#include <asm/ide.h>
#if !defined(MAX_HWIFS) || defined(CONFIG_EMBEDDED)

View File

@ -339,6 +339,7 @@ struct xfrm_usersa_info {
#define XFRM_STATE_NOPMTUDISC 4
#define XFRM_STATE_WILDRECV 8
#define XFRM_STATE_ICMP 16
#define XFRM_STATE_AF_UNSPEC 32
};
struct xfrm_usersa_id {

View File

@ -79,7 +79,7 @@ static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
*
* For such cases, we now have a blacklist
*/
struct kprobe_blackpoint kprobe_blacklist[] = {
static struct kprobe_blackpoint kprobe_blacklist[] = {
{"preempt_schedule",},
{NULL} /* Terminator */
};

View File

@ -666,7 +666,7 @@ static int acquire_console_semaphore_for_printk(unsigned int cpu)
return retval;
}
const char printk_recursion_bug_msg [] =
static const char printk_recursion_bug_msg [] =
KERN_CRIT "BUG: recent printk recursion!\n";
static int printk_recursion_bug;

View File

@ -925,7 +925,15 @@ void rcu_offline_cpu(int cpu)
spin_unlock_irqrestore(&rdp->lock, flags);
}
void __devinit rcu_online_cpu(int cpu)
#else /* #ifdef CONFIG_HOTPLUG_CPU */
void rcu_offline_cpu(int cpu)
{
}
#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
void __cpuinit rcu_online_cpu(int cpu)
{
unsigned long flags;
@ -934,18 +942,6 @@ void __devinit rcu_online_cpu(int cpu)
spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
}
#else /* #ifdef CONFIG_HOTPLUG_CPU */
void rcu_offline_cpu(int cpu)
{
}
void __devinit rcu_online_cpu(int cpu)
{
}
#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
static void rcu_process_callbacks(struct softirq_action *unused)
{
unsigned long flags;

View File

@ -5622,10 +5622,10 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
double_rq_lock(rq_src, rq_dest);
/* Already moved. */
if (task_cpu(p) != src_cpu)
goto out;
goto done;
/* Affinity changed (again). */
if (!cpu_isset(dest_cpu, p->cpus_allowed))
goto out;
goto fail;
on_rq = p->se.on_rq;
if (on_rq)
@ -5636,8 +5636,9 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
activate_task(rq_dest, p, 0);
check_preempt_curr(rq_dest, p);
}
done:
ret = 1;
out:
fail:
double_rq_unlock(rq_src, rq_dest);
return ret;
}

View File

@ -22,6 +22,8 @@
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/kernel.h>
#include <linux/kallsyms.h>
#include <linux/uaccess.h>
#include <asm/page.h> /* for PAGE_SIZE */
#include <asm/div64.h>
@ -482,6 +484,89 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int
return buf;
}
static char *string(char *buf, char *end, char *s, int field_width, int precision, int flags)
{
int len, i;
if ((unsigned long)s < PAGE_SIZE)
s = "<NULL>";
len = strnlen(s, precision);
if (!(flags & LEFT)) {
while (len < field_width--) {
if (buf < end)
*buf = ' ';
++buf;
}
}
for (i = 0; i < len; ++i) {
if (buf < end)
*buf = *s;
++buf; ++s;
}
while (len < field_width--) {
if (buf < end)
*buf = ' ';
++buf;
}
return buf;
}
static inline void *dereference_function_descriptor(void *ptr)
{
#if defined(CONFIG_IA64) || defined(CONFIG_PPC64)
void *p;
if (!probe_kernel_address(ptr, p))
ptr = p;
#endif
return ptr;
}
static char *symbol_string(char *buf, char *end, void *ptr, int field_width, int precision, int flags)
{
unsigned long value = (unsigned long) ptr;
#ifdef CONFIG_KALLSYMS
char sym[KSYM_SYMBOL_LEN];
sprint_symbol(sym, value);
return string(buf, end, sym, field_width, precision, flags);
#else
field_width = 2*sizeof(void *);
flags |= SPECIAL | SMALL | ZEROPAD;
return number(buf, end, value, 16, field_width, precision, flags);
#endif
}
/*
* Show a '%p' thing. A kernel extension is that the '%p' is followed
* by an extra set of alphanumeric characters that are extended format
* specifiers.
*
* Right now we just handle 'F' (for symbolic Function descriptor pointers)
* and 'S' (for Symbolic direct pointers), but this can easily be
* extended in the future (network address types etc).
*
* The difference between 'S' and 'F' is that on ia64 and ppc64 function
* pointers are really function descriptors, which contain a pointer the
* real address.
*/
static char *pointer(const char *fmt, char *buf, char *end, void *ptr, int field_width, int precision, int flags)
{
switch (*fmt) {
case 'F':
ptr = dereference_function_descriptor(ptr);
/* Fallthrough */
case 'S':
return symbol_string(buf, end, ptr, field_width, precision, flags);
}
flags |= SMALL;
if (field_width == -1) {
field_width = 2*sizeof(void *);
flags |= ZEROPAD;
}
return number(buf, end, (unsigned long) ptr, 16, field_width, precision, flags);
}
/**
* vsnprintf - Format a string and place it in a buffer
* @buf: The buffer to place the result into
@ -502,11 +587,9 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int
*/
int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
{
int len;
unsigned long long num;
int i, base;
int base;
char *str, *end, c;
const char *s;
int flags; /* flags to number() */
@ -622,43 +705,18 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
continue;
case 's':
s = va_arg(args, char *);
if ((unsigned long)s < PAGE_SIZE)
s = "<NULL>";
len = strnlen(s, precision);
if (!(flags & LEFT)) {
while (len < field_width--) {
if (str < end)
*str = ' ';
++str;
}
}
for (i = 0; i < len; ++i) {
if (str < end)
*str = *s;
++str; ++s;
}
while (len < field_width--) {
if (str < end)
*str = ' ';
++str;
}
str = string(str, end, va_arg(args, char *), field_width, precision, flags);
continue;
case 'p':
flags |= SMALL;
if (field_width == -1) {
field_width = 2*sizeof(void *);
flags |= ZEROPAD;
}
str = number(str, end,
(unsigned long) va_arg(args, void *),
16, field_width, precision, flags);
str = pointer(fmt+1, str, end,
va_arg(args, void *),
field_width, precision, flags);
/* Skip all alphanumeric pointer suffixes */
while (isalnum(fmt[1]))
fmt++;
continue;
case 'n':
/* FIXME:
* What does C99 say about the overflow case here? */

View File

@ -1628,9 +1628,11 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
void **object;
struct kmem_cache_cpu *c;
unsigned long flags;
unsigned int objsize;
local_irq_save(flags);
c = get_cpu_slab(s, smp_processor_id());
objsize = c->objsize;
if (unlikely(!c->freelist || !node_match(c, node)))
object = __slab_alloc(s, gfpflags, node, addr, c);
@ -1643,7 +1645,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
local_irq_restore(flags);
if (unlikely((gfpflags & __GFP_ZERO) && object))
memset(object, 0, c->objsize);
memset(object, 0, objsize);
return object;
}

View File

@ -442,12 +442,16 @@ int br_del_if(struct net_bridge *br, struct net_device *dev)
void __exit br_cleanup_bridges(void)
{
struct net_device *dev, *nxt;
struct net_device *dev;
rtnl_lock();
for_each_netdev_safe(&init_net, dev, nxt)
if (dev->priv_flags & IFF_EBRIDGE)
restart:
for_each_netdev(&init_net, dev) {
if (dev->priv_flags & IFF_EBRIDGE) {
del_br(dev->priv);
goto restart;
}
}
rtnl_unlock();
}

View File

@ -205,12 +205,19 @@ static int can_create(struct net *net, struct socket *sock, int protocol)
* -ENOBUFS on full driver queue (see net_xmit_errno())
* -ENOMEM when local loopback failed at calling skb_clone()
* -EPERM when trying to send on a non-CAN interface
* -EINVAL when the skb->data does not contain a valid CAN frame
*/
int can_send(struct sk_buff *skb, int loop)
{
struct sk_buff *newskb = NULL;
struct can_frame *cf = (struct can_frame *)skb->data;
int err;
if (skb->len != sizeof(struct can_frame) || cf->can_dlc > 8) {
kfree_skb(skb);
return -EINVAL;
}
if (skb->dev->type != ARPHRD_CAN) {
kfree_skb(skb);
return -EPERM;
@ -605,6 +612,7 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
struct dev_rcv_lists *d;
struct can_frame *cf = (struct can_frame *)skb->data;
int matches;
if (dev->type != ARPHRD_CAN || dev_net(dev) != &init_net) {
@ -612,6 +620,8 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev,
return 0;
}
BUG_ON(skb->len != sizeof(struct can_frame) || cf->can_dlc > 8);
/* update statistics */
can_stats.rx_frames++;
can_stats.rx_frames_delta++;

View File

@ -298,7 +298,7 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
if (head->nframes) {
/* can_frames starting here */
firstframe = (struct can_frame *) skb_tail_pointer(skb);
firstframe = (struct can_frame *)skb_tail_pointer(skb);
memcpy(skb_put(skb, datalen), frames, datalen);
@ -826,6 +826,10 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
for (i = 0; i < msg_head->nframes; i++) {
err = memcpy_fromiovec((u8 *)&op->frames[i],
msg->msg_iov, CFSIZ);
if (op->frames[i].can_dlc > 8)
err = -EINVAL;
if (err < 0)
return err;
@ -858,6 +862,10 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
for (i = 0; i < msg_head->nframes; i++) {
err = memcpy_fromiovec((u8 *)&op->frames[i],
msg->msg_iov, CFSIZ);
if (op->frames[i].can_dlc > 8)
err = -EINVAL;
if (err < 0) {
if (op->frames != &op->sframe)
kfree(op->frames);
@ -1164,9 +1172,12 @@ static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk)
skb->dev = dev;
skb->sk = sk;
can_send(skb, 1); /* send with loopback */
err = can_send(skb, 1); /* send with loopback */
dev_put(dev);
if (err)
return err;
return CFSIZ + MHSIZ;
}
@ -1185,6 +1196,10 @@ static int bcm_sendmsg(struct kiocb *iocb, struct socket *sock,
if (!bo->bound)
return -ENOTCONN;
/* check for valid message length from userspace */
if (size < MHSIZ || (size - MHSIZ) % CFSIZ)
return -EINVAL;
/* check for alternative ifindex for this bcm_op */
if (!ifindex && msg->msg_name) {
@ -1259,8 +1274,8 @@ static int bcm_sendmsg(struct kiocb *iocb, struct socket *sock,
break;
case TX_SEND:
/* we need at least one can_frame */
if (msg_head.nframes < 1)
/* we need exactly one can_frame behind the msg head */
if ((msg_head.nframes != 1) || (size != CFSIZ + MHSIZ))
ret = -EINVAL;
else
ret = bcm_tx_send(msg, ifindex, sk);

View File

@ -632,6 +632,9 @@ static int raw_sendmsg(struct kiocb *iocb, struct socket *sock,
} else
ifindex = ro->ifindex;
if (size != sizeof(struct can_frame))
return -EINVAL;
dev = dev_get_by_index(&init_net, ifindex);
if (!dev)
return -ENXIO;

View File

@ -1359,17 +1359,17 @@ static int check_leaf(struct trie *t, struct leaf *l,
t->stats.semantic_match_miss++;
#endif
if (err <= 0)
return plen;
return err;
}
return -1;
return 1;
}
static int fn_trie_lookup(struct fib_table *tb, const struct flowi *flp,
struct fib_result *res)
{
struct trie *t = (struct trie *) tb->tb_data;
int plen, ret = 0;
int ret;
struct node *n;
struct tnode *pn;
int pos, bits;
@ -1393,10 +1393,7 @@ static int fn_trie_lookup(struct fib_table *tb, const struct flowi *flp,
/* Just a leaf? */
if (IS_LEAF(n)) {
plen = check_leaf(t, (struct leaf *)n, key, flp, res);
if (plen < 0)
goto failed;
ret = 0;
ret = check_leaf(t, (struct leaf *)n, key, flp, res);
goto found;
}
@ -1421,11 +1418,9 @@ static int fn_trie_lookup(struct fib_table *tb, const struct flowi *flp,
}
if (IS_LEAF(n)) {
plen = check_leaf(t, (struct leaf *)n, key, flp, res);
if (plen < 0)
ret = check_leaf(t, (struct leaf *)n, key, flp, res);
if (ret > 0)
goto backtrace;
ret = 0;
goto found;
}

View File

@ -439,8 +439,8 @@ static unsigned char asn1_oid_decode(struct asn1_ctx *ctx,
unsigned int *len)
{
unsigned long subid;
unsigned int size;
unsigned long *optr;
size_t size;
size = eoc - ctx->pointer + 1;

View File

@ -255,6 +255,7 @@
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/skbuff.h>
#include <linux/scatterlist.h>
#include <linux/splice.h>
#include <linux/net.h>
#include <linux/socket.h>
@ -1208,7 +1209,8 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
return -ENOTCONN;
while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
if (offset < skb->len) {
size_t used, len;
int used;
size_t len;
len = skb->len - offset;
/* Stop reading if we hit a patch of urgent data */

View File

@ -224,7 +224,7 @@ static __init int tcpprobe_init(void)
if (bufsize < 0)
return -EINVAL;
tcp_probe.log = kcalloc(sizeof(struct tcp_log), bufsize, GFP_KERNEL);
tcp_probe.log = kcalloc(bufsize, sizeof(struct tcp_log), GFP_KERNEL);
if (!tcp_probe.log)
goto err0;

View File

@ -749,12 +749,12 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
}
write_unlock_bh(&idev->lock);
addrconf_del_timer(ifp);
ipv6_ifa_notify(RTM_DELADDR, ifp);
atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifp);
addrconf_del_timer(ifp);
/*
* Purge or update corresponding prefix
*

View File

@ -445,7 +445,7 @@ looped_back:
kfree_skb(skb);
return -1;
}
if (!ipv6_chk_home_addr(&init_net, addr)) {
if (!ipv6_chk_home_addr(dev_net(skb->dst->dev), addr)) {
IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
IPSTATS_MIB_INADDRERRORS);
kfree_skb(skb);

View File

@ -101,8 +101,8 @@ static int irda_nl_get_mode(struct sk_buff *skb, struct genl_info *info)
hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq,
&irda_nl_family, 0, IRDA_NL_CMD_GET_MODE);
if (IS_ERR(hdr)) {
ret = PTR_ERR(hdr);
if (hdr == NULL) {
ret = -EMSGSIZE;
goto err_out;
}

View File

@ -530,8 +530,6 @@ static int ieee80211_stop(struct net_device *dev)
local->sta_hw_scanning = 0;
}
flush_workqueue(local->hw.workqueue);
sdata->u.sta.flags &= ~IEEE80211_STA_PRIVACY_INVOKED;
kfree(sdata->u.sta.extra_ie);
sdata->u.sta.extra_ie = NULL;
@ -555,6 +553,8 @@ static int ieee80211_stop(struct net_device *dev)
ieee80211_led_radio(local, 0);
flush_workqueue(local->hw.workqueue);
tasklet_disable(&local->tx_pending_tasklet);
tasklet_disable(&local->tasklet);
}

View File

@ -547,15 +547,14 @@ static void ieee80211_set_associated(struct net_device *dev,
sdata->bss_conf.ht_bss_conf = &conf->ht_bss_conf;
}
netif_carrier_on(dev);
ifsta->flags |= IEEE80211_STA_PREV_BSSID_SET;
memcpy(ifsta->prev_bssid, sdata->u.sta.bssid, ETH_ALEN);
memcpy(wrqu.ap_addr.sa_data, sdata->u.sta.bssid, ETH_ALEN);
ieee80211_sta_send_associnfo(dev, ifsta);
} else {
netif_carrier_off(dev);
ieee80211_sta_tear_down_BA_sessions(dev, ifsta->bssid);
ifsta->flags &= ~IEEE80211_STA_ASSOCIATED;
netif_carrier_off(dev);
ieee80211_reset_erp_info(dev);
sdata->bss_conf.assoc_ht = 0;
@ -569,6 +568,10 @@ static void ieee80211_set_associated(struct net_device *dev,
sdata->bss_conf.assoc = assoc;
ieee80211_bss_info_change_notify(sdata, changed);
if (assoc)
netif_carrier_on(dev);
wrqu.ap_addr.sa_family = ARPHRD_ETHER;
wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
}
@ -3611,8 +3614,10 @@ static int ieee80211_sta_find_ibss(struct net_device *dev,
spin_unlock_bh(&local->sta_bss_lock);
#ifdef CONFIG_MAC80211_IBSS_DEBUG
printk(KERN_DEBUG " sta_find_ibss: selected %s current "
"%s\n", print_mac(mac, bssid), print_mac(mac2, ifsta->bssid));
if (found)
printk(KERN_DEBUG " sta_find_ibss: selected %s current "
"%s\n", print_mac(mac, bssid),
print_mac(mac2, ifsta->bssid));
#endif /* CONFIG_MAC80211_IBSS_DEBUG */
if (found && memcmp(ifsta->bssid, bssid, ETH_ALEN) != 0 &&
(bss = ieee80211_rx_bss_get(dev, bssid,

View File

@ -141,7 +141,6 @@ struct rc_pid_events_file_info {
* rate behaviour values (lower means we should trust more what we learnt
* about behaviour of rates, higher means we should trust more the natural
* ordering of rates)
* @fast_start: if Y, push high rates right after initialization
*/
struct rc_pid_debugfs_entries {
struct dentry *dir;
@ -154,7 +153,6 @@ struct rc_pid_debugfs_entries {
struct dentry *sharpen_factor;
struct dentry *sharpen_duration;
struct dentry *norm_offset;
struct dentry *fast_start;
};
void rate_control_pid_event_tx_status(struct rc_pid_event_buffer *buf,
@ -267,9 +265,6 @@ struct rc_pid_info {
/* Normalization offset. */
unsigned int norm_offset;
/* Fast starst parameter. */
unsigned int fast_start;
/* Rates information. */
struct rc_pid_rateinfo *rinfo;

Some files were not shown because too many files have changed in this diff Show More