Merge branch 'sched/urgent' into sched/core

Merge reason: Merge dependent fix, update to latest -rc.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Ingo Molnar
2010-02-08 08:55:43 +01:00
1629 changed files with 41043 additions and 16909 deletions

View File

@@ -80,7 +80,7 @@ char * __acpi_map_table (unsigned long phys_addr, unsigned long size);
void __acpi_unmap_table(char *map, unsigned long size);
int early_acpi_boot_init(void);
int acpi_boot_init (void);
int acpi_boot_table_init (void);
void acpi_boot_table_init (void);
int acpi_mps_check (void);
int acpi_numa_init (void);
@@ -251,6 +251,7 @@ int acpi_check_mem_region(resource_size_t start, resource_size_t n,
void __init acpi_no_s4_hw_signature(void);
void __init acpi_old_suspend_ordering(void);
void __init acpi_s4_no_nvs(void);
void __init acpi_set_sci_en_on_resume(void);
#endif /* CONFIG_PM_SLEEP */
struct acpi_osc_context {
@@ -320,9 +321,9 @@ static inline int acpi_boot_init(void)
return 0;
}
static inline int acpi_boot_table_init(void)
static inline void acpi_boot_table_init(void)
{
return 0;
return;
}
static inline int acpi_mps_check(void)

View File

@@ -647,9 +647,9 @@ static inline int ata_id_has_large_logical_sectors(const u16 *id)
return id[ATA_ID_SECTOR_SIZE] & (1 << 13);
}
static inline u8 ata_id_logical_per_physical_sectors(const u16 *id)
static inline u16 ata_id_logical_per_physical_sectors(const u16 *id)
{
return id[ATA_ID_SECTOR_SIZE] & 0xf;
return 1 << (id[ATA_ID_SECTOR_SIZE] & 0xf);
}
static inline int ata_id_has_lba48(const u16 *id)

View File

@@ -109,6 +109,7 @@ extern int prepare_binprm(struct linux_binprm *);
extern int __must_check remove_arg_zero(struct linux_binprm *);
extern int search_binary_handler(struct linux_binprm *,struct pt_regs *);
extern int flush_old_exec(struct linux_binprm * bprm);
extern void setup_new_exec(struct linux_binprm * bprm);
extern int suid_dumpable;
#define SUID_DUMP_DISABLE 0 /* No setuid dumping */

View File

@@ -845,7 +845,6 @@ static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
* blk_rq_err_bytes() : bytes left till the next error boundary
* blk_rq_sectors() : sectors left in the entire request
* blk_rq_cur_sectors() : sectors left in the current segment
* blk_rq_err_sectors() : sectors left till the next error boundary
*/
static inline sector_t blk_rq_pos(const struct request *rq)
{
@@ -874,11 +873,6 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
return blk_rq_cur_bytes(rq) >> 9;
}
static inline unsigned int blk_rq_err_sectors(const struct request *rq)
{
return blk_rq_err_bytes(rq) >> 9;
}
/*
* Request issue related functions.
*/
@@ -944,6 +938,8 @@ extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
extern void blk_set_default_limits(struct queue_limits *lim);
extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
sector_t offset);
extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
sector_t offset);
extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
sector_t offset);
extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
@@ -1116,11 +1112,18 @@ static inline int queue_alignment_offset(struct request_queue *q)
return q->limits.alignment_offset;
}
static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t offset)
{
unsigned int granularity = max(lim->physical_block_size, lim->io_min);
offset &= granularity - 1;
return (granularity + lim->alignment_offset - offset) & (granularity - 1);
}
static inline int queue_sector_alignment_offset(struct request_queue *q,
sector_t sector)
{
return ((sector << 9) - q->limits.alignment_offset)
& (q->limits.io_min - 1);
return queue_limit_alignment_offset(&q->limits, sector << 9);
}
static inline int bdev_alignment_offset(struct block_device *bdev)
@@ -1147,8 +1150,11 @@ static inline int queue_discard_alignment(struct request_queue *q)
static inline int queue_sector_discard_alignment(struct request_queue *q,
sector_t sector)
{
return ((sector << 9) - q->limits.discard_alignment)
& (q->limits.discard_granularity - 1);
struct queue_limits *lim = &q->limits;
unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1);
return (lim->discard_granularity + lim->discard_alignment - alignment)
& (lim->discard_granularity - 1);
}
static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)

View File

@@ -15,6 +15,7 @@
# define __acquire(x) __context__(x,1)
# define __release(x) __context__(x,-1)
# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
# define __percpu __attribute__((noderef, address_space(3)))
extern void __chk_user_ptr(const volatile void __user *);
extern void __chk_io_ptr(const volatile void __iomem *);
#else
@@ -32,6 +33,7 @@ extern void __chk_io_ptr(const volatile void __iomem *);
# define __acquire(x) (void)0
# define __release(x) (void)0
# define __cond_lock(x,c) (c)
# define __percpu
#endif
#ifdef __KERNEL__

View File

@@ -24,9 +24,6 @@
#include <linux/types.h>
#define CN_IDX_CONNECTOR 0xffffffff
#define CN_VAL_CONNECTOR 0xffffffff
/*
* Process Events connector unique ids -- used for message routing
*/
@@ -75,30 +72,6 @@ struct cn_msg {
__u8 data[0];
};
/*
* Notify structure - requests notification about
* registering/unregistering idx/val in range [first, first+range].
*/
struct cn_notify_req {
__u32 first;
__u32 range;
};
/*
* Main notification control message
* *_notify_num - number of appropriate cn_notify_req structures after
* this struct.
* group - notification receiver's idx.
* len - total length of the attached data.
*/
struct cn_ctl_msg {
__u32 idx_notify_num;
__u32 val_notify_num;
__u32 group;
__u32 len;
__u8 data[0];
};
#ifdef __KERNEL__
#include <asm/atomic.h>
@@ -151,11 +124,6 @@ struct cn_callback_entry {
u32 seq, group;
};
struct cn_ctl_entry {
struct list_head notify_entry;
struct cn_ctl_msg *msg;
};
struct cn_dev {
struct cb_id id;

View File

@@ -0,0 +1,10 @@
#ifndef DECOMPRESS_UNLZO_H
#define DECOMPRESS_UNLZO_H
int unlzo(unsigned char *inbuf, int len,
int(*fill)(void*, unsigned int),
int(*flush)(void*, unsigned int),
unsigned char *output,
int *pos,
void(*error)(char *x));
#endif

View File

@@ -53,7 +53,7 @@
extern const char *drbd_buildtag(void);
#define REL_VERSION "8.3.6"
#define REL_VERSION "8.3.7"
#define API_VERSION 88
#define PRO_VERSION_MIN 86
#define PRO_VERSION_MAX 91

View File

@@ -69,6 +69,7 @@ NL_PACKET(disconnect, 6, )
NL_PACKET(resize, 7,
NL_INT64( 29, T_MAY_IGNORE, resize_size)
NL_BIT( 68, T_MAY_IGNORE, resize_force)
)
NL_PACKET(syncer_conf, 8,

View File

@@ -10,6 +10,7 @@
#include <linux/fcntl.h>
#include <linux/file.h>
#include <linux/wait.h>
/*
* CAREFUL: Check include/asm-generic/fcntl.h when defining
@@ -34,6 +35,9 @@ struct file *eventfd_fget(int fd);
struct eventfd_ctx *eventfd_ctx_fdget(int fd);
struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
int eventfd_signal(struct eventfd_ctx *ctx, int n);
ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt);
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_t *wait,
__u64 *cnt);
#else /* CONFIG_EVENTFD */
@@ -61,6 +65,18 @@ static inline void eventfd_ctx_put(struct eventfd_ctx *ctx)
}
static inline ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait,
__u64 *cnt)
{
return -ENOSYS;
}
static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx,
wait_queue_t *wait, __u64 *cnt)
{
return -ENOSYS;
}
#endif
#endif /* _LINUX_EVENTFD_H */

View File

@@ -340,6 +340,9 @@ struct fw_cdev_send_response {
* The @closure field is passed back to userspace in the response event.
* The @handle field is an out parameter, returning a handle to the allocated
* range to be used for later deallocation of the range.
*
* The address range is allocated on all local nodes. The address allocation
* is exclusive except for the FCP command and response registers.
*/
struct fw_cdev_allocate {
__u64 offset;
@@ -377,7 +380,7 @@ struct fw_cdev_initiate_bus_reset {
* @immediate: If non-zero, immediate key to insert before pointer
* @key: Upper 8 bits of root directory pointer
* @data: Userspace pointer to contents of descriptor block
* @length: Length of descriptor block data, in bytes
* @length: Length of descriptor block data, in quadlets
* @handle: Handle to the descriptor, written by the kernel
*
* Add a descriptor block and optionally a preceding immediate key to the local
@@ -391,6 +394,8 @@ struct fw_cdev_initiate_bus_reset {
* If not 0, the @immediate field specifies an immediate key which will be
* inserted before the root directory pointer.
*
* @immediate, @key, and @data array elements are CPU-endian quadlets.
*
* If successful, the kernel adds the descriptor and writes back a handle to the
* kernel-side object to be used for later removal of the descriptor block and
* immediate key.

View File

@@ -248,8 +248,8 @@ typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode,
void *data, size_t length,
void *callback_data);
/*
* Important note: The callback must guarantee that either fw_send_response()
* or kfree() is called on the @request.
* Important note: Except for the FCP registers, the callback must guarantee
* that either fw_send_response() or kfree() is called on the @request.
*/
typedef void (*fw_address_callback_t)(struct fw_card *card,
struct fw_request *request,

View File

@@ -2463,7 +2463,7 @@ int proc_nr_files(struct ctl_table *table, int write,
int __init get_filesystem_list(char *buf);
#define ACC_MODE(x) ("\000\004\002\006"[(x)&O_ACCMODE])
#define ACC_MODE(x) ("\004\002\006\006"[(x)&O_ACCMODE])
#define OPEN_FMODE(flag) ((__force fmode_t)((flag + 1) & O_ACCMODE))
#endif /* __KERNEL__ */

View File

@@ -256,9 +256,9 @@ extern struct hd_struct *disk_map_sector_rcu(struct gendisk *disk,
#define part_stat_read(part, field) \
({ \
typeof((part)->dkstats->field) res = 0; \
int i; \
for_each_possible_cpu(i) \
res += per_cpu_ptr((part)->dkstats, i)->field; \
unsigned int _cpu; \
for_each_possible_cpu(_cpu) \
res += per_cpu_ptr((part)->dkstats, _cpu)->field; \
res; \
})

View File

@@ -46,7 +46,7 @@ void kmap_flush_unused(void);
static inline unsigned int nr_free_highpages(void) { return 0; }
#define totalhigh_pages 0
#define totalhigh_pages 0UL
#ifndef ARCH_HAS_KMAP
static inline void *kmap(struct page *page)

View File

@@ -75,6 +75,8 @@ extern int __register_perf_hw_breakpoint(struct perf_event *bp);
extern void unregister_hw_breakpoint(struct perf_event *bp);
extern void unregister_wide_hw_breakpoint(struct perf_event **cpu_events);
extern int dbg_reserve_bp_slot(struct perf_event *bp);
extern int dbg_release_bp_slot(struct perf_event *bp);
extern int reserve_bp_slot(struct perf_event *bp);
extern void release_bp_slot(struct perf_event *bp);

View File

@@ -89,4 +89,16 @@ struct adp5588_kpad_platform_data {
unsigned short unlock_key2; /* Unlock Key 2 */
};
struct adp5588_gpio_platform_data {
unsigned gpio_start; /* GPIO Chip base # */
unsigned pullup_dis_mask; /* Pull-Up Disable Mask */
int (*setup)(struct i2c_client *client,
int gpio, unsigned ngpio,
void *context);
int (*teardown)(struct i2c_client *client,
int gpio, unsigned ngpio,
void *context);
void *context;
};
#endif

View File

@@ -832,7 +832,7 @@ struct ieee80211_ht_cap {
#define IEEE80211_HT_CAP_DELAY_BA 0x0400
#define IEEE80211_HT_CAP_MAX_AMSDU 0x0800
#define IEEE80211_HT_CAP_DSSSCCK40 0x1000
#define IEEE80211_HT_CAP_PSMP_SUPPORT 0x2000
#define IEEE80211_HT_CAP_RESERVED 0x2000
#define IEEE80211_HT_CAP_40MHZ_INTOLERANT 0x4000
#define IEEE80211_HT_CAP_LSIG_TXOP_PROT 0x8000

View File

@@ -81,6 +81,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
#define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING)
#define IN_DEV_MFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), MC_FORWARDING)
#define IN_DEV_RPFILTER(in_dev) IN_DEV_MAXCONF((in_dev), RP_FILTER)
#define IN_DEV_SRC_VMARK(in_dev) IN_DEV_ORCONF((in_dev), SRC_VMARK)
#define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \
ACCEPT_SOURCE_ROUTE)
#define IN_DEV_ACCEPT_LOCAL(in_dev) IN_DEV_ORCONF((in_dev), ACCEPT_LOCAL)

View File

@@ -660,6 +660,7 @@ struct input_absinfo {
#define ABS_MT_TOOL_TYPE 0x37 /* Type of touching device */
#define ABS_MT_BLOB_ID 0x38 /* Group a set of packets as a blob */
#define ABS_MT_TRACKING_ID 0x39 /* Unique ID of initiated contact */
#define ABS_MT_PRESSURE 0x3a /* Pressure on contact area */
#define ABS_MAX 0x3f
#define ABS_CNT (ABS_MAX+1)

View File

@@ -4,32 +4,6 @@
#include <linux/radix-tree.h>
#include <linux/rcupdate.h>
/*
* This is the per-process anticipatory I/O scheduler state.
*/
struct as_io_context {
spinlock_t lock;
void (*dtor)(struct as_io_context *aic); /* destructor */
void (*exit)(struct as_io_context *aic); /* called on task exit */
unsigned long state;
atomic_t nr_queued; /* queued reads & sync writes */
atomic_t nr_dispatched; /* number of requests gone to the drivers */
/* IO History tracking */
/* Thinktime */
unsigned long last_end_request;
unsigned long ttime_total;
unsigned long ttime_samples;
unsigned long ttime_mean;
/* Layout pattern */
unsigned int seek_samples;
sector_t last_request_pos;
u64 seek_total;
sector_t seek_mean;
};
struct cfq_queue;
struct cfq_io_context {
void *key;
@@ -78,7 +52,6 @@ struct io_context {
unsigned long last_waited; /* Time last woken after wait for request */
int nr_batch_requests; /* Number of requests left in the batch */
struct as_io_context *aic;
struct radix_tree_root radix_root;
struct hlist_head cic_list;
void *ioc_data;

View File

@@ -653,6 +653,7 @@ struct transaction_s
* waiting for it to finish.
*/
unsigned int t_synchronous_commit:1;
unsigned int t_flushed_data_blocks:1;
/*
* For use by the filesystem to store fs-specific data

View File

@@ -735,6 +735,10 @@ struct sysinfo {
/* Force a compilation error if condition is constant and true */
#define MAYBE_BUILD_BUG_ON(cond) ((void)sizeof(char[1 - 2 * !!(cond)]))
/* Force a compilation error if a constant expression is not a power of 2 */
#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \
BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0))
/* Force a compilation error if condition is true, but also produce a
result (of value 0 and type size_t), so the expression can be used
e.g. in a structure initializer (or where-ever else comma expressions

View File

@@ -67,7 +67,7 @@ struct kfifo {
/**
* DECLARE_KFIFO - macro to declare a kfifo and the associated buffer
* @name: name of the declared kfifo datatype
* @size: size of the fifo buffer
* @size: size of the fifo buffer. Must be a power of two.
*
* Note1: the macro can be used inside struct or union declaration
* Note2: the macro creates two objects:
@@ -81,7 +81,7 @@ union { \
}
/**
* INIT_KFIFO - Initialize a kfifo declared by DECLARED_KFIFO
* INIT_KFIFO - Initialize a kfifo declared by DECLARE_KFIFO
* @name: name of the declared kfifo datatype
*/
#define INIT_KFIFO(name) \
@@ -91,7 +91,7 @@ union { \
/**
* DEFINE_KFIFO - macro to define and initialize a kfifo
* @name: name of the declared kfifo datatype
* @size: size of the fifo buffer
* @size: size of the fifo buffer. Must be a power of two.
*
* Note1: the macro can be used for global and local kfifo data type variables
* Note2: the macro creates two objects:
@@ -104,15 +104,28 @@ union { \
#undef __kfifo_initializer
extern void kfifo_init(struct kfifo *fifo, unsigned char *buffer,
extern void kfifo_init(struct kfifo *fifo, void *buffer,
unsigned int size);
extern __must_check int kfifo_alloc(struct kfifo *fifo, unsigned int size,
gfp_t gfp_mask);
extern void kfifo_free(struct kfifo *fifo);
extern unsigned int kfifo_in(struct kfifo *fifo,
const unsigned char *from, unsigned int len);
const void *from, unsigned int len);
extern __must_check unsigned int kfifo_out(struct kfifo *fifo,
unsigned char *to, unsigned int len);
void *to, unsigned int len);
extern __must_check unsigned int kfifo_out_peek(struct kfifo *fifo,
void *to, unsigned int len, unsigned offset);
/**
* kfifo_initialized - Check if kfifo is initialized.
* @fifo: fifo to check
* Return %true if FIFO is initialized, otherwise %false.
* Assumes the fifo was 0 before.
*/
static inline bool kfifo_initialized(struct kfifo *fifo)
{
return fifo->buffer != 0;
}
/**
* kfifo_reset - removes the entire FIFO contents
@@ -194,7 +207,7 @@ static inline __must_check unsigned int kfifo_avail(struct kfifo *fifo)
* bytes copied.
*/
static inline unsigned int kfifo_in_locked(struct kfifo *fifo,
const unsigned char *from, unsigned int n, spinlock_t *lock)
const void *from, unsigned int n, spinlock_t *lock)
{
unsigned long flags;
unsigned int ret;
@@ -219,7 +232,7 @@ static inline unsigned int kfifo_in_locked(struct kfifo *fifo,
* @to buffer and returns the number of copied bytes.
*/
static inline __must_check unsigned int kfifo_out_locked(struct kfifo *fifo,
unsigned char *to, unsigned int n, spinlock_t *lock)
void *to, unsigned int n, spinlock_t *lock)
{
unsigned long flags;
unsigned int ret;
@@ -228,13 +241,6 @@ static inline __must_check unsigned int kfifo_out_locked(struct kfifo *fifo,
ret = kfifo_out(fifo, to, n);
/*
* optimization: if the FIFO is empty, set the indices to 0
* so we don't wrap the next time
*/
if (kfifo_is_empty(fifo))
kfifo_reset(fifo);
spin_unlock_irqrestore(lock, flags);
return ret;
@@ -242,11 +248,11 @@ static inline __must_check unsigned int kfifo_out_locked(struct kfifo *fifo,
extern void kfifo_skip(struct kfifo *fifo, unsigned int len);
extern __must_check unsigned int kfifo_from_user(struct kfifo *fifo,
const void __user *from, unsigned int n);
extern __must_check int kfifo_from_user(struct kfifo *fifo,
const void __user *from, unsigned int n, unsigned *lenout);
extern __must_check unsigned int kfifo_to_user(struct kfifo *fifo,
void __user *to, unsigned int n);
extern __must_check int kfifo_to_user(struct kfifo *fifo,
void __user *to, unsigned int n, unsigned *lenout);
/*
* __kfifo_add_out internal helper function for updating the out offset

View File

@@ -29,8 +29,7 @@ struct pt_regs;
*
* On some architectures it is required to skip a breakpoint
* exception when it occurs after a breakpoint has been removed.
* This can be implemented in the architecture specific portion of
* for kgdb.
* This can be implemented in the architecture specific portion of kgdb.
*/
extern int kgdb_skipexception(int exception, struct pt_regs *regs);
@@ -65,7 +64,7 @@ struct uart_port;
/**
* kgdb_breakpoint - compiled in breakpoint
*
* This will be impelmented a static inline per architecture. This
* This will be implemented as a static inline per architecture. This
* function is called by the kgdb core to execute an architecture
* specific trap to cause kgdb to enter the exception processing.
*
@@ -190,7 +189,7 @@ kgdb_arch_handle_exception(int vector, int signo, int err_code,
* @flags: Current IRQ state
*
* On SMP systems, we need to get the attention of the other CPUs
* and get them be in a known state. This should do what is needed
* and get them into a known state. This should do what is needed
* to get the other CPUs to call kgdb_wait(). Note that on some arches,
* the NMI approach is not used for rounding up all the CPUs. For example,
* in case of MIPS, smp_call_function() is used to roundup CPUs. In

View File

@@ -36,6 +36,56 @@ int kmemcheck_hide_addr(unsigned long address);
bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size);
/*
* Bitfield annotations
*
* How to use: If you have a struct using bitfields, for example
*
* struct a {
* int x:8, y:8;
* };
*
* then this should be rewritten as
*
* struct a {
* kmemcheck_bitfield_begin(flags);
* int x:8, y:8;
* kmemcheck_bitfield_end(flags);
* };
*
* Now the "flags_begin" and "flags_end" members may be used to refer to the
* beginning and end, respectively, of the bitfield (and things like
* &x.flags_begin is allowed). As soon as the struct is allocated, the bit-
* fields should be annotated:
*
* struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL);
* kmemcheck_annotate_bitfield(a, flags);
*/
#define kmemcheck_bitfield_begin(name) \
int name##_begin[0];
#define kmemcheck_bitfield_end(name) \
int name##_end[0];
#define kmemcheck_annotate_bitfield(ptr, name) \
do { \
int _n; \
\
if (!ptr) \
break; \
\
_n = (long) &((ptr)->name##_end) \
- (long) &((ptr)->name##_begin); \
MAYBE_BUILD_BUG_ON(_n < 0); \
\
kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \
} while (0)
#define kmemcheck_annotate_variable(var) \
do { \
kmemcheck_mark_initialized(&(var), sizeof(var)); \
} while (0) \
#else
#define kmemcheck_enabled 0
@@ -106,60 +156,16 @@ static inline bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size)
return true;
}
#endif /* CONFIG_KMEMCHECK */
/*
* Bitfield annotations
*
* How to use: If you have a struct using bitfields, for example
*
* struct a {
* int x:8, y:8;
* };
*
* then this should be rewritten as
*
* struct a {
* kmemcheck_bitfield_begin(flags);
* int x:8, y:8;
* kmemcheck_bitfield_end(flags);
* };
*
* Now the "flags_begin" and "flags_end" members may be used to refer to the
* beginning and end, respectively, of the bitfield (and things like
* &x.flags_begin is allowed). As soon as the struct is allocated, the bit-
* fields should be annotated:
*
* struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL);
* kmemcheck_annotate_bitfield(a, flags);
*
* Note: We provide the same definitions for both kmemcheck and non-
* kmemcheck kernels. This makes it harder to introduce accidental errors. It
* is also allowed to pass NULL pointers to kmemcheck_annotate_bitfield().
*/
#define kmemcheck_bitfield_begin(name) \
int name##_begin[0];
#define kmemcheck_bitfield_end(name) \
int name##_end[0];
#define kmemcheck_annotate_bitfield(ptr, name) \
do { \
int _n; \
\
if (!ptr) \
break; \
\
_n = (long) &((ptr)->name##_end) \
- (long) &((ptr)->name##_begin); \
MAYBE_BUILD_BUG_ON(_n < 0); \
\
kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \
#define kmemcheck_bitfield_begin(name)
#define kmemcheck_bitfield_end(name)
#define kmemcheck_annotate_bitfield(ptr, name) \
do { \
} while (0)
#define kmemcheck_annotate_variable(var) \
do { \
kmemcheck_mark_initialized(&(var), sizeof(var)); \
} while (0) \
#define kmemcheck_annotate_variable(var) \
do { \
} while (0)
#endif /* CONFIG_KMEMCHECK */
#endif /* LINUX_KMEMCHECK_H */

View File

@@ -17,6 +17,7 @@
enum kmsg_dump_reason {
KMSG_DUMP_OOPS,
KMSG_DUMP_PANIC,
KMSG_DUMP_KEXEC,
};
/**

View File

@@ -354,6 +354,9 @@ enum {
/* max tries if error condition is still set after ->error_handler */
ATA_EH_MAX_TRIES = 5,
/* sometimes resuming a link requires several retries */
ATA_LINK_RESUME_TRIES = 5,
/* how hard are we gonna try to probe/recover devices */
ATA_PROBE_MAX_TRIES = 3,
ATA_EH_DEV_TRIES = 3,

11
include/linux/list_sort.h Normal file
View File

@@ -0,0 +1,11 @@
#ifndef _LINUX_LIST_SORT_H
#define _LINUX_LIST_SORT_H
#include <linux/types.h>
struct list_head;
void list_sort(void *priv, struct list_head *head,
int (*cmp)(void *priv, struct list_head *a,
struct list_head *b));
#endif

View File

@@ -2,6 +2,8 @@
#define MFD_TMIO_H
#include <linux/fb.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#define tmio_ioread8(addr) readb(addr)
#define tmio_ioread16(addr) readw(addr)
@@ -18,11 +20,48 @@
writew((val) >> 16, (addr) + 2); \
} while (0)
#define CNF_CMD 0x04
#define CNF_CTL_BASE 0x10
#define CNF_INT_PIN 0x3d
#define CNF_STOP_CLK_CTL 0x40
#define CNF_GCLK_CTL 0x41
#define CNF_SD_CLK_MODE 0x42
#define CNF_PIN_STATUS 0x44
#define CNF_PWR_CTL_1 0x48
#define CNF_PWR_CTL_2 0x49
#define CNF_PWR_CTL_3 0x4a
#define CNF_CARD_DETECT_MODE 0x4c
#define CNF_SD_SLOT 0x50
#define CNF_EXT_GCLK_CTL_1 0xf0
#define CNF_EXT_GCLK_CTL_2 0xf1
#define CNF_EXT_GCLK_CTL_3 0xf9
#define CNF_SD_LED_EN_1 0xfa
#define CNF_SD_LED_EN_2 0xfe
#define SDCREN 0x2 /* Enable access to MMC CTL regs. (flag in COMMAND_REG)*/
#define sd_config_write8(base, shift, reg, val) \
tmio_iowrite8((val), (base) + ((reg) << (shift)))
#define sd_config_write16(base, shift, reg, val) \
tmio_iowrite16((val), (base) + ((reg) << (shift)))
#define sd_config_write32(base, shift, reg, val) \
do { \
tmio_iowrite16((val), (base) + ((reg) << (shift))); \
tmio_iowrite16((val) >> 16, (base) + ((reg + 2) << (shift))); \
} while (0)
int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base);
int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base);
void tmio_core_mmc_pwr(void __iomem *cnf, int shift, int state);
void tmio_core_mmc_clk_div(void __iomem *cnf, int shift, int state);
/*
* data for the MMC controller
*/
struct tmio_mmc_data {
const unsigned int hclk;
void (*set_pwr)(struct platform_device *host, int state);
void (*set_clk_div)(struct platform_device *host, int state);
};
/*

View File

@@ -666,20 +666,20 @@
#define WM8350_ISINK_FLASH_DUR_64MS (1 << 8)
#define WM8350_ISINK_FLASH_DUR_96MS (2 << 8)
#define WM8350_ISINK_FLASH_DUR_1024MS (3 << 8)
#define WM8350_ISINK_FLASH_ON_INSTANT (0 << 4)
#define WM8350_ISINK_FLASH_ON_0_25S (1 << 4)
#define WM8350_ISINK_FLASH_ON_0_50S (2 << 4)
#define WM8350_ISINK_FLASH_ON_1_00S (3 << 4)
#define WM8350_ISINK_FLASH_ON_1_95S (1 << 4)
#define WM8350_ISINK_FLASH_ON_3_91S (2 << 4)
#define WM8350_ISINK_FLASH_ON_7_80S (3 << 4)
#define WM8350_ISINK_FLASH_OFF_INSTANT (0 << 0)
#define WM8350_ISINK_FLASH_OFF_0_25S (1 << 0)
#define WM8350_ISINK_FLASH_OFF_0_50S (2 << 0)
#define WM8350_ISINK_FLASH_OFF_1_00S (3 << 0)
#define WM8350_ISINK_FLASH_OFF_1_95S (1 << 0)
#define WM8350_ISINK_FLASH_OFF_3_91S (2 << 0)
#define WM8350_ISINK_FLASH_OFF_7_80S (3 << 0)
#define WM8350_ISINK_FLASH_ON_INSTANT (0 << 0)
#define WM8350_ISINK_FLASH_ON_0_25S (1 << 0)
#define WM8350_ISINK_FLASH_ON_0_50S (2 << 0)
#define WM8350_ISINK_FLASH_ON_1_00S (3 << 0)
#define WM8350_ISINK_FLASH_ON_1_95S (1 << 0)
#define WM8350_ISINK_FLASH_ON_3_91S (2 << 0)
#define WM8350_ISINK_FLASH_ON_7_80S (3 << 0)
#define WM8350_ISINK_FLASH_OFF_INSTANT (0 << 4)
#define WM8350_ISINK_FLASH_OFF_0_25S (1 << 4)
#define WM8350_ISINK_FLASH_OFF_0_50S (2 << 4)
#define WM8350_ISINK_FLASH_OFF_1_00S (3 << 4)
#define WM8350_ISINK_FLASH_OFF_1_95S (1 << 4)
#define WM8350_ISINK_FLASH_OFF_3_91S (2 << 4)
#define WM8350_ISINK_FLASH_OFF_7_80S (3 << 4)
/*
* Regulator Interrupts.

View File

@@ -1089,6 +1089,7 @@ extern void zone_pcp_update(struct zone *zone);
/* nommu.c */
extern atomic_long_t mmap_pages_allocated;
extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
/* prio_tree.c */
void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old);

View File

@@ -122,7 +122,9 @@ struct vm_region {
unsigned long vm_pgoff; /* the offset in vm_file corresponding to vm_start */
struct file *vm_file; /* the backing file or NULL */
atomic_t vm_usage; /* region usage count */
int vm_usage; /* region usage count (access under nommu_region_sem) */
bool vm_icache_flushed : 1; /* true if the icache has been flushed for
* this region */
};
/*
@@ -203,10 +205,12 @@ struct mm_struct {
struct vm_area_struct * mmap; /* list of VMAs */
struct rb_root mm_rb;
struct vm_area_struct * mmap_cache; /* last find_vma result */
#ifdef CONFIG_MMU
unsigned long (*get_unmapped_area) (struct file *filp,
unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags);
void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
#endif
unsigned long mmap_base; /* base of mmap area */
unsigned long task_size; /* size of task vm space */
unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */

17
include/linux/mtd/pismo.h Normal file
View File

@@ -0,0 +1,17 @@
/*
* PISMO memory driver - http://www.pismoworld.org/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License.
*/
#ifndef __LINUX_MTD_PISMO_H
#define __LINUX_MTD_PISMO_H
struct pismo_pdata {
void (*set_vpp)(void *, int);
void *vpp_data;
phys_addr_t cs_addrs[5];
};
#endif

View File

@@ -253,6 +253,8 @@ extern struct page * read_cache_page_async(struct address_space *mapping,
extern struct page * read_cache_page(struct address_space *mapping,
pgoff_t index, filler_t *filler,
void *data);
extern struct page * read_cache_page_gfp(struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
extern int read_cache_pages(struct address_space *mapping,
struct list_head *pages, filler_t *filler, void *data);

View File

@@ -243,6 +243,7 @@ struct pci_dev {
unsigned int d2_support:1; /* Low power state D2 is supported */
unsigned int no_d1d2:1; /* Only allow D0 and D3 */
unsigned int wakeup_prepared:1;
unsigned int d3_delay; /* D3->D0 transition time in ms */
#ifdef CONFIG_PCIEASPM
struct pcie_link_state *link_state; /* ASPM link state. */
@@ -566,6 +567,9 @@ void pcibios_align_resource(void *, struct resource *, resource_size_t,
resource_size_t);
void pcibios_update_irq(struct pci_dev *, int irq);
/* Weak but can be overriden by arch */
void pci_fixup_cardbus(struct pci_bus *);
/* Generic PCI functions used internally */
extern struct pci_bus *pci_find_bus(int domain, int busnr);
@@ -752,6 +756,10 @@ pci_power_t pci_target_state(struct pci_dev *dev);
int pci_prepare_to_sleep(struct pci_dev *dev);
int pci_back_from_sleep(struct pci_dev *dev);
/* For use by arch with custom probe code */
void set_pcie_port_type(struct pci_dev *pdev);
void set_pcie_hotplug_bridge(struct pci_dev *pdev);
/* Functions for PCI Hotplug drivers to use */
int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap);
#ifdef CONFIG_HOTPLUG

View File

@@ -814,9 +814,14 @@ extern int perf_event_overflow(struct perf_event *event, int nmi,
*/
static inline int is_software_event(struct perf_event *event)
{
return (event->attr.type != PERF_TYPE_RAW) &&
(event->attr.type != PERF_TYPE_HARDWARE) &&
(event->attr.type != PERF_TYPE_HW_CACHE);
switch (event->attr.type) {
case PERF_TYPE_SOFTWARE:
case PERF_TYPE_TRACEPOINT:
/* for now the breakpoint stuff also works as software event */
case PERF_TYPE_BREAKPOINT:
return 1;
}
return 0;
}
extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];

View File

@@ -447,6 +447,7 @@ struct phy_device* get_phy_device(struct mii_bus *bus, int addr);
int phy_device_register(struct phy_device *phy);
int phy_clear_interrupt(struct phy_device *phydev);
int phy_config_interrupt(struct phy_device *phydev, u32 interrupts);
int phy_init_hw(struct phy_device *phydev);
int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
u32 flags, phy_interface_t interface);
struct phy_device * phy_attach(struct net_device *dev,
@@ -484,6 +485,7 @@ void phy_driver_unregister(struct phy_driver *drv);
int phy_driver_register(struct phy_driver *new_driver);
void phy_prepare_link(struct phy_device *phydev,
void (*adjust_link)(struct net_device *));
void phy_state_machine(struct work_struct *work);
void phy_start_machine(struct phy_device *phydev,
void (*handler)(struct net_device *));
void phy_stop_machine(struct phy_device *phydev);

View File

@@ -2,13 +2,25 @@
#define _LINUX_POISON_H
/********** include/linux/list.h **********/
/*
* Architectures might want to move the poison pointer offset
* into some well-recognized area such as 0xdead000000000000,
* that is also not mappable by user-space exploits:
*/
#ifdef CONFIG_ILLEGAL_POINTER_VALUE
# define POISON_POINTER_DELTA _AC(CONFIG_ILLEGAL_POINTER_VALUE, UL)
#else
# define POISON_POINTER_DELTA 0
#endif
/*
* These are non-NULL pointers that will result in page faults
* under normal circumstances, used to verify that nobody uses
* non-initialized list entries.
*/
#define LIST_POISON1 ((void *) 0x00100100)
#define LIST_POISON2 ((void *) 0x00200200)
#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
/********** include/linux/timer.h **********/
/*

View File

@@ -62,6 +62,12 @@ void reiserfs_write_unlock(struct super_block *s);
int reiserfs_write_lock_once(struct super_block *s);
void reiserfs_write_unlock_once(struct super_block *s, int lock_depth);
#ifdef CONFIG_REISERFS_CHECK
void reiserfs_lock_check_recursive(struct super_block *s);
#else
static inline void reiserfs_lock_check_recursive(struct super_block *s) { }
#endif
/*
* Several mutexes depend on the write lock.
* However sometimes we want to relax the write lock while we hold
@@ -92,11 +98,31 @@ void reiserfs_write_unlock_once(struct super_block *s, int lock_depth);
static inline void reiserfs_mutex_lock_safe(struct mutex *m,
struct super_block *s)
{
reiserfs_lock_check_recursive(s);
reiserfs_write_unlock(s);
mutex_lock(m);
reiserfs_write_lock(s);
}
static inline void
reiserfs_mutex_lock_nested_safe(struct mutex *m, unsigned int subclass,
struct super_block *s)
{
reiserfs_lock_check_recursive(s);
reiserfs_write_unlock(s);
mutex_lock_nested(m, subclass);
reiserfs_write_lock(s);
}
static inline void
reiserfs_down_read_safe(struct rw_semaphore *sem, struct super_block *s)
{
reiserfs_lock_check_recursive(s);
reiserfs_write_unlock(s);
down_read(sem);
reiserfs_write_lock(s);
}
/*
* When we schedule, we usually want to also release the write lock,
* according to the previous bkl based locking scheme of reiserfs.

View File

@@ -3,8 +3,6 @@
#include <linux/time.h>
struct task_struct;
/*
* Resource control/accounting header file for linux
*/
@@ -70,6 +68,12 @@ struct rlimit {
*/
#include <asm/resource.h>
#ifdef __KERNEL__
struct task_struct;
int getrusage(struct task_struct *p, int who, struct rusage __user *ru);
#endif /* __KERNEL__ */
#endif

View File

@@ -310,6 +310,7 @@ extern void sched_show_task(struct task_struct *p);
#ifdef CONFIG_DETECT_SOFTLOCKUP
extern void softlockup_tick(void);
extern void touch_softlockup_watchdog(void);
extern void touch_softlockup_watchdog_sync(void);
extern void touch_all_softlockup_watchdogs(void);
extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write,
void __user *buffer,
@@ -323,6 +324,9 @@ static inline void softlockup_tick(void)
static inline void touch_softlockup_watchdog(void)
{
}
static inline void touch_softlockup_watchdog_sync(void)
{
}
static inline void touch_all_softlockup_watchdogs(void)
{
}
@@ -377,6 +381,8 @@ extern int sysctl_max_map_count;
#include <linux/aio.h>
#ifdef CONFIG_MMU
extern void arch_pick_mmap_layout(struct mm_struct *mm);
extern unsigned long
arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
unsigned long, unsigned long);
@@ -386,6 +392,9 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
unsigned long flags);
extern void arch_unmap_area(struct mm_struct *, unsigned long);
extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
#else
static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
#endif
#if USE_SPLIT_PTLOCKS
/*
@@ -1349,7 +1358,7 @@ struct task_struct {
char comm[TASK_COMM_LEN]; /* executable name excluding path
- access with [gs]et_task_comm (which lock
it with task_lock())
- initialized normally by flush_old_exec */
- initialized normally by setup_new_exec */
/* file system info */
int link_count, total_link_count;
#ifdef CONFIG_SYSVIPC
@@ -2476,8 +2485,6 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
#endif /* CONFIG_SMP */
extern void arch_pick_mmap_layout(struct mm_struct *mm);
#ifdef CONFIG_TRACING
extern void
__trace_special(void *__tr, void *__data,
@@ -2582,6 +2589,28 @@ static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
}
#endif /* CONFIG_MM_OWNER */
static inline unsigned long task_rlimit(const struct task_struct *tsk,
unsigned int limit)
{
return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);
}
static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
unsigned int limit)
{
return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);
}
static inline unsigned long rlimit(unsigned int limit)
{
return task_rlimit(current, limit);
}
static inline unsigned long rlimit_max(unsigned int limit)
{
return task_rlimit_max(current, limit);
}
#endif /* __KERNEL__ */
#endif

View File

@@ -136,25 +136,6 @@ static inline void serio_continue_rx(struct serio *serio)
spin_unlock_irq(&serio->lock);
}
/*
* Use the following functions to pin serio's driver in process context
*/
static inline int serio_pin_driver(struct serio *serio)
{
return mutex_lock_interruptible(&serio->drv_mutex);
}
static inline void serio_pin_driver_uninterruptible(struct serio *serio)
{
mutex_lock(&serio->drv_mutex);
}
static inline void serio_unpin_driver(struct serio *serio)
{
mutex_unlock(&serio->drv_mutex);
}
#endif
/*

View File

@@ -28,8 +28,12 @@ struct ad7879_platform_data {
* 1 = 4, 2 = 8, 3 = 16 (median > averaging)
*/
u8 median;
/* 1 = AUX/VBAT/GPIO set to GPIO Output */
u8 gpio_output;
/* Initial GPIO pin state (valid if gpio_output = 1) */
u8 gpio_default;
/* 1 = AUX/VBAT/GPIO export GPIO to gpiolib
* requires CONFIG_GPIOLIB
*/
bool gpio_export;
/* identifies the first GPIO number handled by this chip;
* or, if negative, requests dynamic ID allocation.
*/
s32 gpio_base;
};

View File

@@ -72,7 +72,10 @@ static inline __must_check char *strstrip(char *str)
}
#ifndef __HAVE_ARCH_STRSTR
extern char * strstr(const char *,const char *);
extern char * strstr(const char *, const char *);
#endif
#ifndef __HAVE_ARCH_STRNSTR
extern char * strnstr(const char *, const char *, size_t);
#endif
#ifndef __HAVE_ARCH_STRLEN
extern __kernel_size_t strlen(const char *);

View File

@@ -195,7 +195,7 @@ struct perf_event_attr;
static const struct syscall_metadata __used \
__attribute__((__aligned__(4))) \
__attribute__((section("__syscalls_metadata"))) \
__syscall_meta_##sname = { \
__syscall_meta__##sname = { \
.name = "sys_"#sname, \
.nb_args = 0, \
.enter_event = &event_enter__##sname, \

View File

@@ -482,6 +482,7 @@ enum
NET_IPV4_CONF_ARP_ACCEPT=21,
NET_IPV4_CONF_ARP_NOTIFY=22,
NET_IPV4_CONF_ACCEPT_LOCAL=23,
NET_IPV4_CONF_SRC_VMARK=24,
__NET_IPV4_CONF_MAX
};

View File

@@ -99,7 +99,7 @@ int arch_update_cpu_topology(void);
| 1*SD_WAKE_AFFINE \
| 1*SD_SHARE_CPUPOWER \
| 0*SD_POWERSAVINGS_BALANCE \
| 0*SD_SHARE_PKG_RESOURCES \
| 1*SD_SHARE_PKG_RESOURCES \
| 0*SD_SERIALIZE \
| 0*SD_PREFER_SIBLING \
, \

View File

@@ -464,7 +464,7 @@ extern int tty_port_alloc_xmit_buf(struct tty_port *port);
extern void tty_port_free_xmit_buf(struct tty_port *port);
extern void tty_port_put(struct tty_port *port);
extern inline struct tty_port *tty_port_get(struct tty_port *port)
static inline struct tty_port *tty_port_get(struct tty_port *port)
{
if (port)
kref_get(&port->kref);
@@ -486,7 +486,7 @@ extern void tty_port_close(struct tty_port *port,
struct tty_struct *tty, struct file *filp);
extern int tty_port_open(struct tty_port *port,
struct tty_struct *tty, struct file *filp);
extern inline int tty_port_users(struct tty_port *port)
static inline int tty_port_users(struct tty_port *port)
{
return port->count + port->blocked_open;
}

View File

@@ -94,6 +94,7 @@ static inline unsigned long __copy_from_user_nocache(void *to,
* happens, handle that and return -EFAULT.
*/
extern long probe_kernel_read(void *dst, void *src, size_t size);
extern long __probe_kernel_read(void *dst, void *src, size_t size);
/*
* probe_kernel_write(): safely attempt to write to a location
@@ -104,6 +105,7 @@ extern long probe_kernel_read(void *dst, void *src, size_t size);
* Safely write to address @dst from the buffer at @src. If a kernel fault
* happens, handle that and return -EFAULT.
*/
extern long probe_kernel_write(void *dst, void *src, size_t size);
extern long notrace probe_kernel_write(void *dst, void *src, size_t size);
extern long notrace __probe_kernel_write(void *dst, void *src, size_t size);
#endif /* __LINUX_UACCESS_H__ */

View File

@@ -192,6 +192,7 @@ struct usb_interface {
unsigned needs_altsetting0:1; /* switch to altsetting 0 is pending */
unsigned needs_binding:1; /* needs delayed unbind/rebind */
unsigned reset_running:1;
unsigned resetting_device:1; /* true: bandwidth alloc after reset */
struct device dev; /* interface specific device info */
struct device *usb_dev;

View File

@@ -70,6 +70,7 @@ struct writeback_control {
struct bdi_writeback;
int inode_wait(void *);
void writeback_inodes_sb(struct super_block *);
int writeback_inodes_sb_if_idle(struct super_block *);
void sync_inodes_sb(struct super_block *);
void writeback_inodes_wbc(struct writeback_control *wbc);
long wb_do_writeback(struct bdi_writeback *wb, int force_wait);