Linux 3.12-rc4
-----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.14 (GNU/Linux) iQEcBAABAgAGBQJSUc9zAAoJEHm+PkMAQRiG9DMH/AtpuAF6LlMRPjrCeuJQ1pyh T0IUO+CsLKO6qtM5IyweP8V6zaasNjIuW1+B6IwVIl8aOrM+M7CwRiKvpey26ldM I8G2ron7hqSOSQqSQs20jN2yGAqQGpYIbTmpdGLAjQ350NNNvEKthbP5SZR5PAmE UuIx5OGEkaOyZXvCZJXU9AZkCxbihlMSt2zFVxybq2pwnGezRUYgCigE81aeyE0I QLwzzMVdkCxtZEpkdJMpLILAz22jN4RoVDbXRa2XC7dA9I2PEEXI9CcLzqCsx2Ii 8eYS+no2K5N2rrpER7JFUB2B/2X8FaVDE+aJBCkfbtwaYTV9UYLq3a/sKVpo1Cs= =xSFJ -----END PGP SIGNATURE----- Merge tag 'v3.12-rc4' into sched/core Merge Linux v3.12-rc4 to fix a conflict and also to refresh the tree before applying more scheduler patches. Conflicts: arch/avr32/include/asm/Kbuild Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
37bf06375c
893 changed files with 8766 additions and 4841 deletions
|
|
@ -6,12 +6,12 @@ static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
|
|||
return mk_pte(page, pgprot);
|
||||
}
|
||||
|
||||
static inline int huge_pte_write(pte_t pte)
|
||||
static inline unsigned long huge_pte_write(pte_t pte)
|
||||
{
|
||||
return pte_write(pte);
|
||||
}
|
||||
|
||||
static inline int huge_pte_dirty(pte_t pte)
|
||||
static inline unsigned long huge_pte_dirty(pte_t pte)
|
||||
{
|
||||
return pte_dirty(pte);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1 @@
|
|||
/* no content, but patch(1) dislikes empty files */
|
||||
|
|
@ -1322,10 +1322,9 @@ extern int drm_newctx(struct drm_device *dev, void *data,
|
|||
extern int drm_rmctx(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
|
||||
extern void drm_legacy_ctxbitmap_init(struct drm_device *dev);
|
||||
extern void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev);
|
||||
extern void drm_legacy_ctxbitmap_release(struct drm_device *dev,
|
||||
struct drm_file *file_priv);
|
||||
extern int drm_ctxbitmap_init(struct drm_device *dev);
|
||||
extern void drm_ctxbitmap_cleanup(struct drm_device *dev);
|
||||
extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
|
||||
|
||||
extern int drm_setsareactx(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
|
|
|
|||
|
|
@ -12,11 +12,14 @@
|
|||
{0x1002, 0x130F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
||||
{0x1002, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
||||
{0x1002, 0x1311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
||||
{0x1002, 0x1312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
||||
{0x1002, 0x1313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
||||
{0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
||||
{0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
||||
{0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
||||
{0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
||||
{0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
||||
{0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
||||
{0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
|
||||
{0x1002, 0x3151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||
|
|
|
|||
|
|
@ -158,6 +158,26 @@ static inline bool balloon_page_movable(struct page *page)
|
|||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* isolated_balloon_page - identify an isolated balloon page on private
|
||||
* compaction/migration page lists.
|
||||
*
|
||||
* After a compaction thread isolates a balloon page for migration, it raises
|
||||
* the page refcount to prevent concurrent compaction threads from re-isolating
|
||||
* the same page. For that reason putback_movable_pages(), or other routines
|
||||
* that need to identify isolated balloon pages on private pagelists, cannot
|
||||
* rely on balloon_page_movable() to accomplish the task.
|
||||
*/
|
||||
static inline bool isolated_balloon_page(struct page *page)
|
||||
{
|
||||
/* Already isolated balloon pages, by default, have a raised refcount */
|
||||
if (page_flags_cleared(page) && !page_mapped(page) &&
|
||||
page_count(page) >= 2)
|
||||
return __is_movable_balloon_page(page);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* balloon_page_insert - insert a page into the balloon's page list and make
|
||||
* the page->mapping assignment accordingly.
|
||||
|
|
@ -243,6 +263,11 @@ static inline bool balloon_page_movable(struct page *page)
|
|||
return false;
|
||||
}
|
||||
|
||||
static inline bool isolated_balloon_page(struct page *page)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool balloon_page_isolate(struct page *page)
|
||||
{
|
||||
return false;
|
||||
|
|
|
|||
|
|
@ -242,6 +242,7 @@ extern int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc,
|
|||
struct bcma_device *core, bool enable);
|
||||
extern void bcma_core_pci_up(struct bcma_bus *bus);
|
||||
extern void bcma_core_pci_down(struct bcma_bus *bus);
|
||||
extern void bcma_core_pci_power_save(struct bcma_bus *bus, bool up);
|
||||
|
||||
extern int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev);
|
||||
extern int bcma_core_pci_plat_dev_init(struct pci_dev *dev);
|
||||
|
|
|
|||
|
|
@ -862,6 +862,17 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
|
|||
return blk_queue_get_max_sectors(q, rq->cmd_flags);
|
||||
}
|
||||
|
||||
static inline unsigned int blk_rq_count_bios(struct request *rq)
|
||||
{
|
||||
unsigned int nr_bios = 0;
|
||||
struct bio *bio;
|
||||
|
||||
__rq_for_each_bio(bio, rq)
|
||||
nr_bios++;
|
||||
|
||||
return nr_bios;
|
||||
}
|
||||
|
||||
/*
|
||||
* Request issue related functions.
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -335,6 +335,8 @@ extern int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
|
|||
struct ceph_osd_request *req);
|
||||
extern void ceph_osdc_sync(struct ceph_osd_client *osdc);
|
||||
|
||||
extern void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc);
|
||||
|
||||
extern int ceph_osdc_readpages(struct ceph_osd_client *osdc,
|
||||
struct ceph_vino vino,
|
||||
struct ceph_file_layout *layout,
|
||||
|
|
|
|||
|
|
@ -406,13 +406,14 @@ int dm_noflush_suspending(struct dm_target *ti);
|
|||
union map_info *dm_get_mapinfo(struct bio *bio);
|
||||
union map_info *dm_get_rq_mapinfo(struct request *rq);
|
||||
|
||||
struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
|
||||
|
||||
/*
|
||||
* Geometry functions.
|
||||
*/
|
||||
int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
|
||||
int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
|
||||
|
||||
|
||||
/*-----------------------------------------------------------------
|
||||
* Functions for manipulating device-mapper tables.
|
||||
*---------------------------------------------------------------*/
|
||||
|
|
|
|||
|
|
@ -30,10 +30,13 @@
|
|||
/*
|
||||
* Framework version for util services.
|
||||
*/
|
||||
#define UTIL_FW_MINOR 0
|
||||
|
||||
#define UTIL_WS2K8_FW_MAJOR 1
|
||||
#define UTIL_WS2K8_FW_VERSION (UTIL_WS2K8_FW_MAJOR << 16 | UTIL_FW_MINOR)
|
||||
|
||||
#define UTIL_FW_MAJOR 3
|
||||
#define UTIL_FW_MINOR 0
|
||||
#define UTIL_FW_MAJOR_MINOR (UTIL_FW_MAJOR << 16 | UTIL_FW_MINOR)
|
||||
#define UTIL_FW_VERSION (UTIL_FW_MAJOR << 16 | UTIL_FW_MINOR)
|
||||
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -55,7 +55,7 @@
|
|||
#define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */
|
||||
#define DMAR_IQ_SHIFT 4 /* Invalidation queue head/tail shift */
|
||||
#define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */
|
||||
#define DMAR_ICS_REG 0x98 /* Invalidation complete status register */
|
||||
#define DMAR_ICS_REG 0x9c /* Invalidation complete status register */
|
||||
#define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */
|
||||
|
||||
#define OFFSET_STRIDE (9)
|
||||
|
|
|
|||
|
|
@ -439,6 +439,17 @@ static inline char *hex_byte_pack(char *buf, u8 byte)
|
|||
return buf;
|
||||
}
|
||||
|
||||
extern const char hex_asc_upper[];
|
||||
#define hex_asc_upper_lo(x) hex_asc_upper[((x) & 0x0f)]
|
||||
#define hex_asc_upper_hi(x) hex_asc_upper[((x) & 0xf0) >> 4]
|
||||
|
||||
static inline char *hex_byte_pack_upper(char *buf, u8 byte)
|
||||
{
|
||||
*buf++ = hex_asc_upper_hi(byte);
|
||||
*buf++ = hex_asc_upper_lo(byte);
|
||||
return buf;
|
||||
}
|
||||
|
||||
static inline char * __deprecated pack_hex_byte(char *buf, u8 byte)
|
||||
{
|
||||
return hex_byte_pack(buf, byte);
|
||||
|
|
|
|||
|
|
@ -53,23 +53,6 @@ struct mem_cgroup_reclaim_cookie {
|
|||
unsigned int generation;
|
||||
};
|
||||
|
||||
enum mem_cgroup_filter_t {
|
||||
VISIT, /* visit current node */
|
||||
SKIP, /* skip the current node and continue traversal */
|
||||
SKIP_TREE, /* skip the whole subtree and continue traversal */
|
||||
};
|
||||
|
||||
/*
|
||||
* mem_cgroup_filter_t predicate might instruct mem_cgroup_iter_cond how to
|
||||
* iterate through the hierarchy tree. Each tree element is checked by the
|
||||
* predicate before it is returned by the iterator. If a filter returns
|
||||
* SKIP or SKIP_TREE then the iterator code continues traversal (with the
|
||||
* next node down the hierarchy or the next node that doesn't belong under the
|
||||
* memcg's subtree).
|
||||
*/
|
||||
typedef enum mem_cgroup_filter_t
|
||||
(*mem_cgroup_iter_filter)(struct mem_cgroup *memcg, struct mem_cgroup *root);
|
||||
|
||||
#ifdef CONFIG_MEMCG
|
||||
/*
|
||||
* All "charge" functions with gfp_mask should use GFP_KERNEL or
|
||||
|
|
@ -137,18 +120,9 @@ mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
|
|||
extern void mem_cgroup_end_migration(struct mem_cgroup *memcg,
|
||||
struct page *oldpage, struct page *newpage, bool migration_ok);
|
||||
|
||||
struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root,
|
||||
struct mem_cgroup *prev,
|
||||
struct mem_cgroup_reclaim_cookie *reclaim,
|
||||
mem_cgroup_iter_filter cond);
|
||||
|
||||
static inline struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
|
||||
struct mem_cgroup *prev,
|
||||
struct mem_cgroup_reclaim_cookie *reclaim)
|
||||
{
|
||||
return mem_cgroup_iter_cond(root, prev, reclaim, NULL);
|
||||
}
|
||||
|
||||
struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
|
||||
struct mem_cgroup *,
|
||||
struct mem_cgroup_reclaim_cookie *);
|
||||
void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
|
||||
|
||||
/*
|
||||
|
|
@ -260,9 +234,9 @@ static inline void mem_cgroup_dec_page_stat(struct page *page,
|
|||
mem_cgroup_update_page_stat(page, idx, -1);
|
||||
}
|
||||
|
||||
enum mem_cgroup_filter_t
|
||||
mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
|
||||
struct mem_cgroup *root);
|
||||
unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
|
||||
gfp_t gfp_mask,
|
||||
unsigned long *total_scanned);
|
||||
|
||||
void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
|
||||
static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
|
||||
|
|
@ -376,15 +350,6 @@ static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg,
|
|||
struct page *oldpage, struct page *newpage, bool migration_ok)
|
||||
{
|
||||
}
|
||||
static inline struct mem_cgroup *
|
||||
mem_cgroup_iter_cond(struct mem_cgroup *root,
|
||||
struct mem_cgroup *prev,
|
||||
struct mem_cgroup_reclaim_cookie *reclaim,
|
||||
mem_cgroup_iter_filter cond)
|
||||
{
|
||||
/* first call must return non-NULL, second return NULL */
|
||||
return (struct mem_cgroup *)(unsigned long)!prev;
|
||||
}
|
||||
|
||||
static inline struct mem_cgroup *
|
||||
mem_cgroup_iter(struct mem_cgroup *root,
|
||||
|
|
@ -471,11 +436,11 @@ static inline void mem_cgroup_dec_page_stat(struct page *page,
|
|||
}
|
||||
|
||||
static inline
|
||||
enum mem_cgroup_filter_t
|
||||
mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
|
||||
struct mem_cgroup *root)
|
||||
unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
|
||||
gfp_t gfp_mask,
|
||||
unsigned long *total_scanned)
|
||||
{
|
||||
return VISIT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void mem_cgroup_split_huge_fixup(struct page *head)
|
||||
|
|
|
|||
|
|
@ -15,8 +15,8 @@
|
|||
#include <linux/spinlock_types.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/lockdep.h>
|
||||
|
||||
#include <linux/atomic.h>
|
||||
#include <asm/processor.h>
|
||||
|
||||
/*
|
||||
* Simple, straightforward mutexes with strict semantics:
|
||||
|
|
@ -175,8 +175,8 @@ extern void mutex_unlock(struct mutex *lock);
|
|||
|
||||
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
|
||||
|
||||
#ifndef CONFIG_HAVE_ARCH_MUTEX_CPU_RELAX
|
||||
#define arch_mutex_cpu_relax() cpu_relax()
|
||||
#ifndef arch_mutex_cpu_relax
|
||||
# define arch_mutex_cpu_relax() cpu_relax()
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -950,14 +950,14 @@ struct netdev_phys_port_id {
|
|||
* multiple net devices on single physical port.
|
||||
*
|
||||
* void (*ndo_add_vxlan_port)(struct net_device *dev,
|
||||
* sa_family_t sa_family, __u16 port);
|
||||
* sa_family_t sa_family, __be16 port);
|
||||
* Called by vxlan to notiy a driver about the UDP port and socket
|
||||
* address family that vxlan is listnening to. It is called only when
|
||||
* a new port starts listening. The operation is protected by the
|
||||
* vxlan_net->sock_lock.
|
||||
*
|
||||
* void (*ndo_del_vxlan_port)(struct net_device *dev,
|
||||
* sa_family_t sa_family, __u16 port);
|
||||
* sa_family_t sa_family, __be16 port);
|
||||
* Called by vxlan to notify the driver about a UDP port and socket
|
||||
* address family that vxlan is not listening to anymore. The operation
|
||||
* is protected by the vxlan_net->sock_lock.
|
||||
|
|
@ -1093,10 +1093,10 @@ struct net_device_ops {
|
|||
struct netdev_phys_port_id *ppid);
|
||||
void (*ndo_add_vxlan_port)(struct net_device *dev,
|
||||
sa_family_t sa_family,
|
||||
__u16 port);
|
||||
__be16 port);
|
||||
void (*ndo_del_vxlan_port)(struct net_device *dev,
|
||||
sa_family_t sa_family,
|
||||
__u16 port);
|
||||
__be16 port);
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -296,10 +296,12 @@ ip_set_eexist(int ret, u32 flags)
|
|||
|
||||
/* Match elements marked with nomatch */
|
||||
static inline bool
|
||||
ip_set_enomatch(int ret, u32 flags, enum ipset_adt adt)
|
||||
ip_set_enomatch(int ret, u32 flags, enum ipset_adt adt, struct ip_set *set)
|
||||
{
|
||||
return adt == IPSET_TEST &&
|
||||
ret == -ENOTEMPTY && ((flags >> 16) & IPSET_FLAG_NOMATCH);
|
||||
(set->type->features & IPSET_TYPE_NOMATCH) &&
|
||||
((flags >> 16) & IPSET_FLAG_NOMATCH) &&
|
||||
(ret > 0 || ret == -ENOTEMPTY);
|
||||
}
|
||||
|
||||
/* Check the NLA_F_NET_BYTEORDER flag */
|
||||
|
|
|
|||
|
|
@ -1455,7 +1455,8 @@ struct nfs_rpc_ops {
|
|||
struct inode * (*open_context) (struct inode *dir,
|
||||
struct nfs_open_context *ctx,
|
||||
int open_flags,
|
||||
struct iattr *iattr);
|
||||
struct iattr *iattr,
|
||||
int *);
|
||||
int (*have_delegation)(struct inode *, fmode_t);
|
||||
int (*return_delegation)(struct inode *);
|
||||
struct nfs_client *(*alloc_client) (const struct nfs_client_initdata *);
|
||||
|
|
|
|||
|
|
@ -1,8 +1,6 @@
|
|||
#ifndef __OF_IRQ_H
|
||||
#define __OF_IRQ_H
|
||||
|
||||
#if defined(CONFIG_OF)
|
||||
struct of_irq;
|
||||
#include <linux/types.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/irq.h>
|
||||
|
|
@ -10,14 +8,6 @@ struct of_irq;
|
|||
#include <linux/ioport.h>
|
||||
#include <linux/of.h>
|
||||
|
||||
/*
|
||||
* irq_of_parse_and_map() is used by all OF enabled platforms; but SPARC
|
||||
* implements it differently. However, the prototype is the same for all,
|
||||
* so declare it here regardless of the CONFIG_OF_IRQ setting.
|
||||
*/
|
||||
extern unsigned int irq_of_parse_and_map(struct device_node *node, int index);
|
||||
|
||||
#if defined(CONFIG_OF_IRQ)
|
||||
/**
|
||||
* of_irq - container for device_node/irq_specifier pair for an irq controller
|
||||
* @controller: pointer to interrupt controller device tree node
|
||||
|
|
@ -71,11 +61,17 @@ extern int of_irq_to_resource(struct device_node *dev, int index,
|
|||
extern int of_irq_count(struct device_node *dev);
|
||||
extern int of_irq_to_resource_table(struct device_node *dev,
|
||||
struct resource *res, int nr_irqs);
|
||||
extern struct device_node *of_irq_find_parent(struct device_node *child);
|
||||
|
||||
extern void of_irq_init(const struct of_device_id *matches);
|
||||
|
||||
#endif /* CONFIG_OF_IRQ */
|
||||
#if defined(CONFIG_OF)
|
||||
/*
|
||||
* irq_of_parse_and_map() is used by all OF enabled platforms; but SPARC
|
||||
* implements it differently. However, the prototype is the same for all,
|
||||
* so declare it here regardless of the CONFIG_OF_IRQ setting.
|
||||
*/
|
||||
extern unsigned int irq_of_parse_and_map(struct device_node *node, int index);
|
||||
extern struct device_node *of_irq_find_parent(struct device_node *child);
|
||||
|
||||
#else /* !CONFIG_OF */
|
||||
static inline unsigned int irq_of_parse_and_map(struct device_node *dev,
|
||||
|
|
|
|||
|
|
@ -40,6 +40,8 @@ enum regulator_status {
|
|||
};
|
||||
|
||||
/**
|
||||
* struct regulator_linear_range - specify linear voltage ranges
|
||||
*
|
||||
* Specify a range of voltages for regulator_map_linar_range() and
|
||||
* regulator_list_linear_range().
|
||||
*
|
||||
|
|
|
|||
|
|
@ -498,7 +498,7 @@ struct sk_buff {
|
|||
* headers if needed
|
||||
*/
|
||||
__u8 encapsulation:1;
|
||||
/* 7/9 bit hole (depending on ndisc_nodetype presence) */
|
||||
/* 6/8 bit hole (depending on ndisc_nodetype presence) */
|
||||
kmemcheck_bitfield_end(flags2);
|
||||
|
||||
#if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL
|
||||
|
|
|
|||
|
|
@ -155,6 +155,12 @@ smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
|
|||
|
||||
static inline void kick_all_cpus_sync(void) { }
|
||||
|
||||
static inline void __smp_call_function_single(int cpuid,
|
||||
struct call_single_data *data, int wait)
|
||||
{
|
||||
on_each_cpu(data->func, data->info, wait);
|
||||
}
|
||||
|
||||
#endif /* !SMP */
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -141,6 +141,7 @@ extern int do_adjtimex(struct timex *);
|
|||
extern void hardpps(const struct timespec *, const struct timespec *);
|
||||
|
||||
int read_current_timer(unsigned long *timer_val);
|
||||
void ntp_notify_cmos_timer(void);
|
||||
|
||||
/* The clock frequency of the i8253/i8254 PIT */
|
||||
#define PIT_TICK_RATE 1193182ul
|
||||
|
|
|
|||
|
|
@ -42,6 +42,7 @@ struct usbnet {
|
|||
struct usb_host_endpoint *status;
|
||||
unsigned maxpacket;
|
||||
struct timer_list delay;
|
||||
const char *padding_pkt;
|
||||
|
||||
/* protocol/interface state */
|
||||
struct net_device *net;
|
||||
|
|
|
|||
|
|
@ -67,6 +67,10 @@ int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
|
|||
int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr);
|
||||
#endif
|
||||
|
||||
bool ipv6_chk_custom_prefix(const struct in6_addr *addr,
|
||||
const unsigned int prefix_len,
|
||||
struct net_device *dev);
|
||||
|
||||
int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev);
|
||||
|
||||
struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net,
|
||||
|
|
|
|||
|
|
@ -104,6 +104,7 @@ enum {
|
|||
enum {
|
||||
HCI_SETUP,
|
||||
HCI_AUTO_OFF,
|
||||
HCI_RFKILLED,
|
||||
HCI_MGMT,
|
||||
HCI_PAIRABLE,
|
||||
HCI_SERVICE_CACHE,
|
||||
|
|
|
|||
|
|
@ -264,9 +264,11 @@ int ip_dont_fragment(struct sock *sk, struct dst_entry *dst)
|
|||
|
||||
extern void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more);
|
||||
|
||||
static inline void ip_select_ident(struct iphdr *iph, struct dst_entry *dst, struct sock *sk)
|
||||
static inline void ip_select_ident(struct sk_buff *skb, struct dst_entry *dst, struct sock *sk)
|
||||
{
|
||||
if (iph->frag_off & htons(IP_DF)) {
|
||||
struct iphdr *iph = ip_hdr(skb);
|
||||
|
||||
if ((iph->frag_off & htons(IP_DF)) && !skb->local_df) {
|
||||
/* This is only to work around buggy Windows95/2000
|
||||
* VJ compression implementations. If the ID field
|
||||
* does not change, they drop every other packet in
|
||||
|
|
@ -278,9 +280,11 @@ static inline void ip_select_ident(struct iphdr *iph, struct dst_entry *dst, str
|
|||
__ip_select_ident(iph, dst, 0);
|
||||
}
|
||||
|
||||
static inline void ip_select_ident_more(struct iphdr *iph, struct dst_entry *dst, struct sock *sk, int more)
|
||||
static inline void ip_select_ident_more(struct sk_buff *skb, struct dst_entry *dst, struct sock *sk, int more)
|
||||
{
|
||||
if (iph->frag_off & htons(IP_DF)) {
|
||||
struct iphdr *iph = ip_hdr(skb);
|
||||
|
||||
if ((iph->frag_off & htons(IP_DF)) && !skb->local_df) {
|
||||
if (sk && inet_sk(sk)->inet_daddr) {
|
||||
iph->id = htons(inet_sk(sk)->inet_id);
|
||||
inet_sk(sk)->inet_id += 1 + more;
|
||||
|
|
|
|||
|
|
@ -723,8 +723,6 @@ struct ip_vs_dest_dst {
|
|||
struct rcu_head rcu_head;
|
||||
};
|
||||
|
||||
/* In grace period after removing */
|
||||
#define IP_VS_DEST_STATE_REMOVING 0x01
|
||||
/*
|
||||
* The real server destination forwarding entry
|
||||
* with ip address, port number, and so on.
|
||||
|
|
@ -742,7 +740,7 @@ struct ip_vs_dest {
|
|||
|
||||
atomic_t refcnt; /* reference counter */
|
||||
struct ip_vs_stats stats; /* statistics */
|
||||
unsigned long state; /* state flags */
|
||||
unsigned long idle_start; /* start time, jiffies */
|
||||
|
||||
/* connection counters and thresholds */
|
||||
atomic_t activeconns; /* active connections */
|
||||
|
|
@ -756,14 +754,13 @@ struct ip_vs_dest {
|
|||
struct ip_vs_dest_dst __rcu *dest_dst; /* cached dst info */
|
||||
|
||||
/* for virtual service */
|
||||
struct ip_vs_service *svc; /* service it belongs to */
|
||||
struct ip_vs_service __rcu *svc; /* service it belongs to */
|
||||
__u16 protocol; /* which protocol (TCP/UDP) */
|
||||
__be16 vport; /* virtual port number */
|
||||
union nf_inet_addr vaddr; /* virtual IP address */
|
||||
__u32 vfwmark; /* firewall mark of service */
|
||||
|
||||
struct list_head t_list; /* in dest_trash */
|
||||
struct rcu_head rcu_head;
|
||||
unsigned int in_rs_table:1; /* we are in rs_table */
|
||||
};
|
||||
|
||||
|
|
@ -1649,7 +1646,7 @@ static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
|
|||
/* CONFIG_IP_VS_NFCT */
|
||||
#endif
|
||||
|
||||
static inline unsigned int
|
||||
static inline int
|
||||
ip_vs_dest_conn_overhead(struct ip_vs_dest *dest)
|
||||
{
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -112,6 +112,7 @@ struct mrp_applicant {
|
|||
struct mrp_application *app;
|
||||
struct net_device *dev;
|
||||
struct timer_list join_timer;
|
||||
struct timer_list periodic_timer;
|
||||
|
||||
spinlock_t lock;
|
||||
struct sk_buff_head queue;
|
||||
|
|
|
|||
|
|
@ -74,6 +74,7 @@ struct net {
|
|||
struct hlist_head *dev_index_head;
|
||||
unsigned int dev_base_seq; /* protected by rtnl_mutex */
|
||||
int ifindex;
|
||||
unsigned int dev_unreg_count;
|
||||
|
||||
/* core fib_rules */
|
||||
struct list_head rules_ops;
|
||||
|
|
|
|||
|
|
@ -86,7 +86,7 @@ static inline void nf_ct_ext_destroy(struct nf_conn *ct)
|
|||
static inline void nf_ct_ext_free(struct nf_conn *ct)
|
||||
{
|
||||
if (ct->ext)
|
||||
kfree(ct->ext);
|
||||
kfree_rcu(ct->ext, rcu);
|
||||
}
|
||||
|
||||
/* Add this type, returns pointer to data or NULL. */
|
||||
|
|
|
|||
|
|
@ -56,7 +56,7 @@ struct synproxy_options {
|
|||
|
||||
struct tcphdr;
|
||||
struct xt_synproxy_info;
|
||||
extern void synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
|
||||
extern bool synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
|
||||
const struct tcphdr *th,
|
||||
struct synproxy_options *opts);
|
||||
extern unsigned int synproxy_options_size(const struct synproxy_options *opts);
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@
|
|||
|
||||
#include <linux/types.h>
|
||||
|
||||
extern void net_secret_init(void);
|
||||
extern __u32 secure_ip_id(__be32 daddr);
|
||||
extern __u32 secure_ipv6_id(const __be32 daddr[4]);
|
||||
extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
|
||||
|
|
|
|||
|
|
@ -409,6 +409,11 @@ struct sock {
|
|||
void (*sk_destruct)(struct sock *sk);
|
||||
};
|
||||
|
||||
#define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data)))
|
||||
|
||||
#define rcu_dereference_sk_user_data(sk) rcu_dereference(__sk_user_data((sk)))
|
||||
#define rcu_assign_sk_user_data(sk, ptr) rcu_assign_pointer(__sk_user_data((sk)), ptr)
|
||||
|
||||
/*
|
||||
* SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK
|
||||
* or not whether his port will be reused by someone else. SK_FORCE_REUSE
|
||||
|
|
|
|||
|
|
@ -618,6 +618,7 @@ TRACE_EVENT(block_rq_remap,
|
|||
__field( unsigned int, nr_sector )
|
||||
__field( dev_t, old_dev )
|
||||
__field( sector_t, old_sector )
|
||||
__field( unsigned int, nr_bios )
|
||||
__array( char, rwbs, RWBS_LEN)
|
||||
),
|
||||
|
||||
|
|
@ -627,15 +628,16 @@ TRACE_EVENT(block_rq_remap,
|
|||
__entry->nr_sector = blk_rq_sectors(rq);
|
||||
__entry->old_dev = dev;
|
||||
__entry->old_sector = from;
|
||||
__entry->nr_bios = blk_rq_count_bios(rq);
|
||||
blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
|
||||
),
|
||||
|
||||
TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
|
||||
TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu %u",
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
|
||||
(unsigned long long)__entry->sector,
|
||||
__entry->nr_sector,
|
||||
MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
|
||||
(unsigned long long)__entry->old_sector)
|
||||
(unsigned long long)__entry->old_sector, __entry->nr_bios)
|
||||
);
|
||||
|
||||
#endif /* _TRACE_BLOCK_H */
|
||||
|
|
|
|||
|
|
@ -42,6 +42,7 @@ struct extent_buffer;
|
|||
{ BTRFS_TREE_LOG_OBJECTID, "TREE_LOG" }, \
|
||||
{ BTRFS_QUOTA_TREE_OBJECTID, "QUOTA_TREE" }, \
|
||||
{ BTRFS_TREE_RELOC_OBJECTID, "TREE_RELOC" }, \
|
||||
{ BTRFS_UUID_TREE_OBJECTID, "UUID_RELOC" }, \
|
||||
{ BTRFS_DATA_RELOC_TREE_OBJECTID, "DATA_RELOC_TREE" })
|
||||
|
||||
#define show_root_type(obj) \
|
||||
|
|
|
|||
|
|
@ -1007,4 +1007,6 @@ struct drm_radeon_info {
|
|||
#define SI_TILE_MODE_DEPTH_STENCIL_2D_4AA 3
|
||||
#define SI_TILE_MODE_DEPTH_STENCIL_2D_8AA 2
|
||||
|
||||
#define CIK_TILE_MODE_DEPTH_STENCIL_1D 5
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -380,10 +380,13 @@ struct perf_event_mmap_page {
|
|||
union {
|
||||
__u64 capabilities;
|
||||
struct {
|
||||
__u64 cap_usr_time : 1,
|
||||
cap_usr_rdpmc : 1,
|
||||
cap_usr_time_zero : 1,
|
||||
cap_____res : 61;
|
||||
__u64 cap_bit0 : 1, /* Always 0, deprecated, see commit 860f085b74e9 */
|
||||
cap_bit0_is_deprecated : 1, /* Always 1, signals that bit 0 is zero */
|
||||
|
||||
cap_user_rdpmc : 1, /* The RDPMC instruction can be used to read counts */
|
||||
cap_user_time : 1, /* The time_* fields are used */
|
||||
cap_user_time_zero : 1, /* The time_zero field is used */
|
||||
cap_____res : 59;
|
||||
};
|
||||
};
|
||||
|
||||
|
|
@ -442,12 +445,13 @@ struct perf_event_mmap_page {
|
|||
* ((rem * time_mult) >> time_shift);
|
||||
*/
|
||||
__u64 time_zero;
|
||||
__u32 size; /* Header size up to __reserved[] fields. */
|
||||
|
||||
/*
|
||||
* Hole for extension of the self monitor capabilities
|
||||
*/
|
||||
|
||||
__u64 __reserved[119]; /* align to 1k */
|
||||
__u8 __reserved[118*8+4]; /* align to 1k. */
|
||||
|
||||
/*
|
||||
* Control data for the mmap() data buffer.
|
||||
|
|
@ -528,6 +532,7 @@ enum perf_event_type {
|
|||
* u64 len;
|
||||
* u64 pgoff;
|
||||
* char filename[];
|
||||
* struct sample_id sample_id;
|
||||
* };
|
||||
*/
|
||||
PERF_RECORD_MMAP = 1,
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue