Linux 3.12-rc4

-----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1.4.14 (GNU/Linux)
 
 iQEcBAABAgAGBQJSUc9zAAoJEHm+PkMAQRiG9DMH/AtpuAF6LlMRPjrCeuJQ1pyh
 T0IUO+CsLKO6qtM5IyweP8V6zaasNjIuW1+B6IwVIl8aOrM+M7CwRiKvpey26ldM
 I8G2ron7hqSOSQqSQs20jN2yGAqQGpYIbTmpdGLAjQ350NNNvEKthbP5SZR5PAmE
 UuIx5OGEkaOyZXvCZJXU9AZkCxbihlMSt2zFVxybq2pwnGezRUYgCigE81aeyE0I
 QLwzzMVdkCxtZEpkdJMpLILAz22jN4RoVDbXRa2XC7dA9I2PEEXI9CcLzqCsx2Ii
 8eYS+no2K5N2rrpER7JFUB2B/2X8FaVDE+aJBCkfbtwaYTV9UYLq3a/sKVpo1Cs=
 =xSFJ
 -----END PGP SIGNATURE-----

Merge tag 'v3.12-rc4' into sched/core

Merge Linux v3.12-rc4 to fix a conflict and also to refresh the tree
before applying more scheduler patches.

Conflicts:
	arch/avr32/include/asm/Kbuild

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2013-10-09 12:36:13 +02:00
commit 37bf06375c
893 changed files with 8766 additions and 4841 deletions

View file

@ -158,6 +158,26 @@ static inline bool balloon_page_movable(struct page *page)
return false;
}
/*
* isolated_balloon_page - identify an isolated balloon page on private
* compaction/migration page lists.
*
* After a compaction thread isolates a balloon page for migration, it raises
* the page refcount to prevent concurrent compaction threads from re-isolating
* the same page. For that reason putback_movable_pages(), or other routines
* that need to identify isolated balloon pages on private pagelists, cannot
* rely on balloon_page_movable() to accomplish the task.
*/
static inline bool isolated_balloon_page(struct page *page)
{
/* Already isolated balloon pages, by default, have a raised refcount */
if (page_flags_cleared(page) && !page_mapped(page) &&
page_count(page) >= 2)
return __is_movable_balloon_page(page);
return false;
}
/*
* balloon_page_insert - insert a page into the balloon's page list and make
* the page->mapping assignment accordingly.
@ -243,6 +263,11 @@ static inline bool balloon_page_movable(struct page *page)
return false;
}
static inline bool isolated_balloon_page(struct page *page)
{
return false;
}
static inline bool balloon_page_isolate(struct page *page)
{
return false;

View file

@ -242,6 +242,7 @@ extern int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc,
struct bcma_device *core, bool enable);
extern void bcma_core_pci_up(struct bcma_bus *bus);
extern void bcma_core_pci_down(struct bcma_bus *bus);
extern void bcma_core_pci_power_save(struct bcma_bus *bus, bool up);
extern int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev);
extern int bcma_core_pci_plat_dev_init(struct pci_dev *dev);

View file

@ -862,6 +862,17 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
return blk_queue_get_max_sectors(q, rq->cmd_flags);
}
static inline unsigned int blk_rq_count_bios(struct request *rq)
{
unsigned int nr_bios = 0;
struct bio *bio;
__rq_for_each_bio(bio, rq)
nr_bios++;
return nr_bios;
}
/*
* Request issue related functions.
*/

View file

@ -335,6 +335,8 @@ extern int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
struct ceph_osd_request *req);
extern void ceph_osdc_sync(struct ceph_osd_client *osdc);
extern void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc);
extern int ceph_osdc_readpages(struct ceph_osd_client *osdc,
struct ceph_vino vino,
struct ceph_file_layout *layout,

View file

@ -406,13 +406,14 @@ int dm_noflush_suspending(struct dm_target *ti);
union map_info *dm_get_mapinfo(struct bio *bio);
union map_info *dm_get_rq_mapinfo(struct request *rq);
struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
/*
* Geometry functions.
*/
int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
/*-----------------------------------------------------------------
* Functions for manipulating device-mapper tables.
*---------------------------------------------------------------*/

View file

@ -30,10 +30,13 @@
/*
* Framework version for util services.
*/
#define UTIL_FW_MINOR 0
#define UTIL_WS2K8_FW_MAJOR 1
#define UTIL_WS2K8_FW_VERSION (UTIL_WS2K8_FW_MAJOR << 16 | UTIL_FW_MINOR)
#define UTIL_FW_MAJOR 3
#define UTIL_FW_MINOR 0
#define UTIL_FW_MAJOR_MINOR (UTIL_FW_MAJOR << 16 | UTIL_FW_MINOR)
#define UTIL_FW_VERSION (UTIL_FW_MAJOR << 16 | UTIL_FW_MINOR)
/*

View file

@ -55,7 +55,7 @@
#define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */
#define DMAR_IQ_SHIFT 4 /* Invalidation queue head/tail shift */
#define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */
#define DMAR_ICS_REG 0x98 /* Invalidation complete status register */
#define DMAR_ICS_REG 0x9c /* Invalidation complete status register */
#define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */
#define OFFSET_STRIDE (9)

View file

@ -439,6 +439,17 @@ static inline char *hex_byte_pack(char *buf, u8 byte)
return buf;
}
extern const char hex_asc_upper[];
#define hex_asc_upper_lo(x) hex_asc_upper[((x) & 0x0f)]
#define hex_asc_upper_hi(x) hex_asc_upper[((x) & 0xf0) >> 4]
static inline char *hex_byte_pack_upper(char *buf, u8 byte)
{
*buf++ = hex_asc_upper_hi(byte);
*buf++ = hex_asc_upper_lo(byte);
return buf;
}
static inline char * __deprecated pack_hex_byte(char *buf, u8 byte)
{
return hex_byte_pack(buf, byte);

View file

@ -53,23 +53,6 @@ struct mem_cgroup_reclaim_cookie {
unsigned int generation;
};
enum mem_cgroup_filter_t {
VISIT, /* visit current node */
SKIP, /* skip the current node and continue traversal */
SKIP_TREE, /* skip the whole subtree and continue traversal */
};
/*
* mem_cgroup_filter_t predicate might instruct mem_cgroup_iter_cond how to
* iterate through the hierarchy tree. Each tree element is checked by the
* predicate before it is returned by the iterator. If a filter returns
* SKIP or SKIP_TREE then the iterator code continues traversal (with the
* next node down the hierarchy or the next node that doesn't belong under the
* memcg's subtree).
*/
typedef enum mem_cgroup_filter_t
(*mem_cgroup_iter_filter)(struct mem_cgroup *memcg, struct mem_cgroup *root);
#ifdef CONFIG_MEMCG
/*
* All "charge" functions with gfp_mask should use GFP_KERNEL or
@ -137,18 +120,9 @@ mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
extern void mem_cgroup_end_migration(struct mem_cgroup *memcg,
struct page *oldpage, struct page *newpage, bool migration_ok);
struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root,
struct mem_cgroup *prev,
struct mem_cgroup_reclaim_cookie *reclaim,
mem_cgroup_iter_filter cond);
static inline struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
struct mem_cgroup *prev,
struct mem_cgroup_reclaim_cookie *reclaim)
{
return mem_cgroup_iter_cond(root, prev, reclaim, NULL);
}
struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
struct mem_cgroup *,
struct mem_cgroup_reclaim_cookie *);
void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
/*
@ -260,9 +234,9 @@ static inline void mem_cgroup_dec_page_stat(struct page *page,
mem_cgroup_update_page_stat(page, idx, -1);
}
enum mem_cgroup_filter_t
mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
struct mem_cgroup *root);
unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
gfp_t gfp_mask,
unsigned long *total_scanned);
void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
@ -376,15 +350,6 @@ static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg,
struct page *oldpage, struct page *newpage, bool migration_ok)
{
}
static inline struct mem_cgroup *
mem_cgroup_iter_cond(struct mem_cgroup *root,
struct mem_cgroup *prev,
struct mem_cgroup_reclaim_cookie *reclaim,
mem_cgroup_iter_filter cond)
{
/* first call must return non-NULL, second return NULL */
return (struct mem_cgroup *)(unsigned long)!prev;
}
static inline struct mem_cgroup *
mem_cgroup_iter(struct mem_cgroup *root,
@ -471,11 +436,11 @@ static inline void mem_cgroup_dec_page_stat(struct page *page,
}
static inline
enum mem_cgroup_filter_t
mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
struct mem_cgroup *root)
unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
gfp_t gfp_mask,
unsigned long *total_scanned)
{
return VISIT;
return 0;
}
static inline void mem_cgroup_split_huge_fixup(struct page *head)

View file

@ -15,8 +15,8 @@
#include <linux/spinlock_types.h>
#include <linux/linkage.h>
#include <linux/lockdep.h>
#include <linux/atomic.h>
#include <asm/processor.h>
/*
* Simple, straightforward mutexes with strict semantics:
@ -175,8 +175,8 @@ extern void mutex_unlock(struct mutex *lock);
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
#ifndef CONFIG_HAVE_ARCH_MUTEX_CPU_RELAX
#define arch_mutex_cpu_relax() cpu_relax()
#ifndef arch_mutex_cpu_relax
# define arch_mutex_cpu_relax() cpu_relax()
#endif
#endif

View file

@ -950,14 +950,14 @@ struct netdev_phys_port_id {
* multiple net devices on single physical port.
*
* void (*ndo_add_vxlan_port)(struct net_device *dev,
* sa_family_t sa_family, __u16 port);
* sa_family_t sa_family, __be16 port);
* Called by vxlan to notiy a driver about the UDP port and socket
* address family that vxlan is listnening to. It is called only when
* a new port starts listening. The operation is protected by the
* vxlan_net->sock_lock.
*
* void (*ndo_del_vxlan_port)(struct net_device *dev,
* sa_family_t sa_family, __u16 port);
* sa_family_t sa_family, __be16 port);
* Called by vxlan to notify the driver about a UDP port and socket
* address family that vxlan is not listening to anymore. The operation
* is protected by the vxlan_net->sock_lock.
@ -1093,10 +1093,10 @@ struct net_device_ops {
struct netdev_phys_port_id *ppid);
void (*ndo_add_vxlan_port)(struct net_device *dev,
sa_family_t sa_family,
__u16 port);
__be16 port);
void (*ndo_del_vxlan_port)(struct net_device *dev,
sa_family_t sa_family,
__u16 port);
__be16 port);
};
/*

View file

@ -296,10 +296,12 @@ ip_set_eexist(int ret, u32 flags)
/* Match elements marked with nomatch */
static inline bool
ip_set_enomatch(int ret, u32 flags, enum ipset_adt adt)
ip_set_enomatch(int ret, u32 flags, enum ipset_adt adt, struct ip_set *set)
{
return adt == IPSET_TEST &&
ret == -ENOTEMPTY && ((flags >> 16) & IPSET_FLAG_NOMATCH);
(set->type->features & IPSET_TYPE_NOMATCH) &&
((flags >> 16) & IPSET_FLAG_NOMATCH) &&
(ret > 0 || ret == -ENOTEMPTY);
}
/* Check the NLA_F_NET_BYTEORDER flag */

View file

@ -1455,7 +1455,8 @@ struct nfs_rpc_ops {
struct inode * (*open_context) (struct inode *dir,
struct nfs_open_context *ctx,
int open_flags,
struct iattr *iattr);
struct iattr *iattr,
int *);
int (*have_delegation)(struct inode *, fmode_t);
int (*return_delegation)(struct inode *);
struct nfs_client *(*alloc_client) (const struct nfs_client_initdata *);

View file

@ -1,8 +1,6 @@
#ifndef __OF_IRQ_H
#define __OF_IRQ_H
#if defined(CONFIG_OF)
struct of_irq;
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/irq.h>
@ -10,14 +8,6 @@ struct of_irq;
#include <linux/ioport.h>
#include <linux/of.h>
/*
* irq_of_parse_and_map() is used by all OF enabled platforms; but SPARC
* implements it differently. However, the prototype is the same for all,
* so declare it here regardless of the CONFIG_OF_IRQ setting.
*/
extern unsigned int irq_of_parse_and_map(struct device_node *node, int index);
#if defined(CONFIG_OF_IRQ)
/**
* of_irq - container for device_node/irq_specifier pair for an irq controller
* @controller: pointer to interrupt controller device tree node
@ -71,11 +61,17 @@ extern int of_irq_to_resource(struct device_node *dev, int index,
extern int of_irq_count(struct device_node *dev);
extern int of_irq_to_resource_table(struct device_node *dev,
struct resource *res, int nr_irqs);
extern struct device_node *of_irq_find_parent(struct device_node *child);
extern void of_irq_init(const struct of_device_id *matches);
#endif /* CONFIG_OF_IRQ */
#if defined(CONFIG_OF)
/*
* irq_of_parse_and_map() is used by all OF enabled platforms; but SPARC
* implements it differently. However, the prototype is the same for all,
* so declare it here regardless of the CONFIG_OF_IRQ setting.
*/
extern unsigned int irq_of_parse_and_map(struct device_node *node, int index);
extern struct device_node *of_irq_find_parent(struct device_node *child);
#else /* !CONFIG_OF */
static inline unsigned int irq_of_parse_and_map(struct device_node *dev,

View file

@ -40,6 +40,8 @@ enum regulator_status {
};
/**
* struct regulator_linear_range - specify linear voltage ranges
*
* Specify a range of voltages for regulator_map_linar_range() and
* regulator_list_linear_range().
*

View file

@ -498,7 +498,7 @@ struct sk_buff {
* headers if needed
*/
__u8 encapsulation:1;
/* 7/9 bit hole (depending on ndisc_nodetype presence) */
/* 6/8 bit hole (depending on ndisc_nodetype presence) */
kmemcheck_bitfield_end(flags2);
#if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL

View file

@ -155,6 +155,12 @@ smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
static inline void kick_all_cpus_sync(void) { }
static inline void __smp_call_function_single(int cpuid,
struct call_single_data *data, int wait)
{
on_each_cpu(data->func, data->info, wait);
}
#endif /* !SMP */
/*

View file

@ -141,6 +141,7 @@ extern int do_adjtimex(struct timex *);
extern void hardpps(const struct timespec *, const struct timespec *);
int read_current_timer(unsigned long *timer_val);
void ntp_notify_cmos_timer(void);
/* The clock frequency of the i8253/i8254 PIT */
#define PIT_TICK_RATE 1193182ul

View file

@ -42,6 +42,7 @@ struct usbnet {
struct usb_host_endpoint *status;
unsigned maxpacket;
struct timer_list delay;
const char *padding_pkt;
/* protocol/interface state */
struct net_device *net;