Merge branch 'linus' into core/locking
Reason: Pull in the semaphore related changes Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
commit
37eca0d64a
1433 changed files with 20337 additions and 16359 deletions
|
|
@ -304,8 +304,8 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
|
|||
OSC_PCI_EXPRESS_PME_CONTROL | \
|
||||
OSC_PCI_EXPRESS_AER_CONTROL | \
|
||||
OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL)
|
||||
|
||||
extern acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 flags);
|
||||
extern acpi_status acpi_pci_osc_control_set(acpi_handle handle,
|
||||
u32 *mask, u32 req);
|
||||
extern void acpi_early_init(void);
|
||||
|
||||
#else /* !CONFIG_ACPI */
|
||||
|
|
|
|||
|
|
@ -150,6 +150,7 @@ struct clcd_fb {
|
|||
u16 off_cntl;
|
||||
u32 clcd_cntl;
|
||||
u32 cmap[16];
|
||||
bool clk_enabled;
|
||||
};
|
||||
|
||||
static inline void clcdfb_decode(struct clcd_fb *fb, struct clcd_regs *regs)
|
||||
|
|
|
|||
|
|
@ -50,8 +50,8 @@ struct linux_binprm{
|
|||
int unsafe; /* how unsafe this exec is (mask of LSM_UNSAFE_*) */
|
||||
unsigned int per_clear; /* bits to clear in current->personality */
|
||||
int argc, envc;
|
||||
char * filename; /* Name of binary as seen by procps */
|
||||
char * interp; /* Name of the binary really executed. Most
|
||||
const char * filename; /* Name of binary as seen by procps */
|
||||
const char * interp; /* Name of the binary really executed. Most
|
||||
of the time same as filename, but could be
|
||||
different for binfmt_{misc,script} */
|
||||
unsigned interp_flags;
|
||||
|
|
@ -126,7 +126,8 @@ extern int setup_arg_pages(struct linux_binprm * bprm,
|
|||
unsigned long stack_top,
|
||||
int executable_stack);
|
||||
extern int bprm_mm_init(struct linux_binprm *bprm);
|
||||
extern int copy_strings_kernel(int argc,char ** argv,struct linux_binprm *bprm);
|
||||
extern int copy_strings_kernel(int argc, const char *const *argv,
|
||||
struct linux_binprm *bprm);
|
||||
extern int prepare_bprm_creds(struct linux_binprm *bprm);
|
||||
extern void install_exec_creds(struct linux_binprm *bprm);
|
||||
extern void do_coredump(long signr, int exit_code, struct pt_regs *regs);
|
||||
|
|
|
|||
|
|
@ -32,7 +32,6 @@ enum bh_state_bits {
|
|||
BH_Delay, /* Buffer is not yet allocated on disk */
|
||||
BH_Boundary, /* Block is followed by a discontiguity */
|
||||
BH_Write_EIO, /* I/O error on write */
|
||||
BH_Ordered, /* ordered write */
|
||||
BH_Eopnotsupp, /* operation not supported (barrier) */
|
||||
BH_Unwritten, /* Buffer is allocated on disk but not written */
|
||||
BH_Quiet, /* Buffer Error Prinks to be quiet */
|
||||
|
|
@ -125,7 +124,6 @@ BUFFER_FNS(Async_Write, async_write)
|
|||
BUFFER_FNS(Delay, delay)
|
||||
BUFFER_FNS(Boundary, boundary)
|
||||
BUFFER_FNS(Write_EIO, write_io_error)
|
||||
BUFFER_FNS(Ordered, ordered)
|
||||
BUFFER_FNS(Eopnotsupp, eopnotsupp)
|
||||
BUFFER_FNS(Unwritten, unwritten)
|
||||
|
||||
|
|
@ -183,6 +181,8 @@ void unlock_buffer(struct buffer_head *bh);
|
|||
void __lock_buffer(struct buffer_head *bh);
|
||||
void ll_rw_block(int, int, struct buffer_head * bh[]);
|
||||
int sync_dirty_buffer(struct buffer_head *bh);
|
||||
int __sync_dirty_buffer(struct buffer_head *bh, int rw);
|
||||
void write_dirty_buffer(struct buffer_head *bh, int rw);
|
||||
int submit_bh(int, struct buffer_head *);
|
||||
void write_boundary_block(struct block_device *bdev,
|
||||
sector_t bblock, unsigned blocksize);
|
||||
|
|
|
|||
|
|
@ -578,7 +578,12 @@ struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
|
|||
void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it);
|
||||
int cgroup_scan_tasks(struct cgroup_scanner *scan);
|
||||
int cgroup_attach_task(struct cgroup *, struct task_struct *);
|
||||
int cgroup_attach_task_current_cg(struct task_struct *);
|
||||
int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
|
||||
|
||||
static inline int cgroup_attach_task_current_cg(struct task_struct *tsk)
|
||||
{
|
||||
return cgroup_attach_task_all(current, tsk);
|
||||
}
|
||||
|
||||
/*
|
||||
* CSS ID is ID for cgroup_subsys_state structs under subsys. This only works
|
||||
|
|
@ -636,6 +641,11 @@ static inline int cgroupstats_build(struct cgroupstats *stats,
|
|||
}
|
||||
|
||||
/* No cgroups - nothing to do */
|
||||
static inline int cgroup_attach_task_all(struct task_struct *from,
|
||||
struct task_struct *t)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int cgroup_attach_task_current_cg(struct task_struct *t)
|
||||
{
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -360,5 +360,8 @@ extern ssize_t compat_rw_copy_check_uvector(int type,
|
|||
const struct compat_iovec __user *uvector, unsigned long nr_segs,
|
||||
unsigned long fast_segs, struct iovec *fast_pointer,
|
||||
struct iovec **ret_pointer);
|
||||
|
||||
extern void __user *compat_alloc_user_space(unsigned long len);
|
||||
|
||||
#endif /* CONFIG_COMPAT */
|
||||
#endif /* _LINUX_COMPAT_H */
|
||||
|
|
|
|||
|
|
@ -53,6 +53,7 @@ struct cpuidle_state {
|
|||
#define CPUIDLE_FLAG_BALANCED (0x40) /* medium latency, moderate savings */
|
||||
#define CPUIDLE_FLAG_DEEP (0x80) /* high latency, large savings */
|
||||
#define CPUIDLE_FLAG_IGNORE (0x100) /* ignore during this idle period */
|
||||
#define CPUIDLE_FLAG_TLB_FLUSHED (0x200) /* tlb will be flushed */
|
||||
|
||||
#define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000)
|
||||
|
||||
|
|
|
|||
|
|
@ -102,6 +102,9 @@ static inline u64 dma_get_mask(struct device *dev)
|
|||
return DMA_BIT_MASK(32);
|
||||
}
|
||||
|
||||
#ifdef ARCH_HAS_DMA_SET_COHERENT_MASK
|
||||
int dma_set_coherent_mask(struct device *dev, u64 mask);
|
||||
#else
|
||||
static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
|
||||
{
|
||||
if (!dma_supported(dev, mask))
|
||||
|
|
@ -109,6 +112,7 @@ static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
|
|||
dev->coherent_dma_mask = mask;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
extern u64 dma_get_required_mask(struct device *dev);
|
||||
|
||||
|
|
|
|||
|
|
@ -548,7 +548,7 @@ static inline bool dma_dev_has_pq_continue(struct dma_device *dma)
|
|||
return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE;
|
||||
}
|
||||
|
||||
static unsigned short dma_dev_to_maxpq(struct dma_device *dma)
|
||||
static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma)
|
||||
{
|
||||
return dma->max_pq & ~DMA_HAS_PQ_CONTINUE;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -136,6 +136,7 @@ extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
|
|||
|
||||
extern int elevator_init(struct request_queue *, char *);
|
||||
extern void elevator_exit(struct elevator_queue *);
|
||||
extern int elevator_change(struct request_queue *, const char *);
|
||||
extern int elv_rq_merge_ok(struct request *, struct bio *);
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -65,14 +65,14 @@
|
|||
FAN_ALL_PERM_EVENTS |\
|
||||
FAN_Q_OVERFLOW)
|
||||
|
||||
#define FANOTIFY_METADATA_VERSION 1
|
||||
#define FANOTIFY_METADATA_VERSION 2
|
||||
|
||||
struct fanotify_event_metadata {
|
||||
__u32 event_len;
|
||||
__u32 vers;
|
||||
__s32 fd;
|
||||
__u64 mask;
|
||||
__s64 pid;
|
||||
__s32 fd;
|
||||
__s32 pid;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct fanotify_response {
|
||||
|
|
@ -95,11 +95,4 @@ struct fanotify_response {
|
|||
(long)(meta)->event_len >= (long)FAN_EVENT_METADATA_LEN && \
|
||||
(long)(meta)->event_len <= (long)(len))
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
struct fanotify_wait {
|
||||
struct fsnotify_event *event;
|
||||
__s32 fd;
|
||||
};
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _LINUX_FANOTIFY_H */
|
||||
|
|
|
|||
|
|
@ -125,9 +125,6 @@ struct inodes_stat_t {
|
|||
* block layer could (in theory) choose to ignore this
|
||||
* request if it runs into resource problems.
|
||||
* WRITE A normal async write. Device will be plugged.
|
||||
* SWRITE Like WRITE, but a special case for ll_rw_block() that
|
||||
* tells it to lock the buffer first. Normally a buffer
|
||||
* must be locked before doing IO.
|
||||
* WRITE_SYNC_PLUG Synchronous write. Identical to WRITE, but passes down
|
||||
* the hint that someone will be waiting on this IO
|
||||
* shortly. The device must still be unplugged explicitly,
|
||||
|
|
@ -138,9 +135,6 @@ struct inodes_stat_t {
|
|||
* immediately after submission. The write equivalent
|
||||
* of READ_SYNC.
|
||||
* WRITE_ODIRECT_PLUG Special case write for O_DIRECT only.
|
||||
* SWRITE_SYNC
|
||||
* SWRITE_SYNC_PLUG Like WRITE_SYNC/WRITE_SYNC_PLUG, but locks the buffer.
|
||||
* See SWRITE.
|
||||
* WRITE_BARRIER Like WRITE_SYNC, but tells the block layer that all
|
||||
* previously submitted writes must be safely on storage
|
||||
* before this one is started. Also guarantees that when
|
||||
|
|
@ -155,7 +149,6 @@ struct inodes_stat_t {
|
|||
#define READ 0
|
||||
#define WRITE RW_MASK
|
||||
#define READA RWA_MASK
|
||||
#define SWRITE (WRITE | READA)
|
||||
|
||||
#define READ_SYNC (READ | REQ_SYNC | REQ_UNPLUG)
|
||||
#define READ_META (READ | REQ_META)
|
||||
|
|
@ -165,8 +158,6 @@ struct inodes_stat_t {
|
|||
#define WRITE_META (WRITE | REQ_META)
|
||||
#define WRITE_BARRIER (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \
|
||||
REQ_HARDBARRIER)
|
||||
#define SWRITE_SYNC_PLUG (SWRITE | REQ_SYNC | REQ_NOIDLE)
|
||||
#define SWRITE_SYNC (SWRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG)
|
||||
|
||||
/*
|
||||
* These aren't really reads or writes, they pass down information about
|
||||
|
|
@ -929,6 +920,9 @@ struct file {
|
|||
#define f_vfsmnt f_path.mnt
|
||||
const struct file_operations *f_op;
|
||||
spinlock_t f_lock; /* f_ep_links, f_flags, no IRQ */
|
||||
#ifdef CONFIG_SMP
|
||||
int f_sb_list_cpu;
|
||||
#endif
|
||||
atomic_long_t f_count;
|
||||
unsigned int f_flags;
|
||||
fmode_t f_mode;
|
||||
|
|
@ -953,9 +947,6 @@ struct file {
|
|||
unsigned long f_mnt_write_state;
|
||||
#endif
|
||||
};
|
||||
extern spinlock_t files_lock;
|
||||
#define file_list_lock() spin_lock(&files_lock);
|
||||
#define file_list_unlock() spin_unlock(&files_lock);
|
||||
|
||||
#define get_file(x) atomic_long_inc(&(x)->f_count)
|
||||
#define fput_atomic(x) atomic_long_add_unless(&(x)->f_count, -1, 1)
|
||||
|
|
@ -1102,6 +1093,10 @@ struct file_lock {
|
|||
|
||||
#include <linux/fcntl.h>
|
||||
|
||||
/* temporary stubs for BKL removal */
|
||||
#define lock_flocks() lock_kernel()
|
||||
#define unlock_flocks() unlock_kernel()
|
||||
|
||||
extern void send_sigio(struct fown_struct *fown, int fd, int band);
|
||||
|
||||
#ifdef CONFIG_FILE_LOCKING
|
||||
|
|
@ -1346,7 +1341,11 @@ struct super_block {
|
|||
|
||||
struct list_head s_inodes; /* all inodes */
|
||||
struct hlist_head s_anon; /* anonymous dentries for (nfs) exporting */
|
||||
#ifdef CONFIG_SMP
|
||||
struct list_head __percpu *s_files;
|
||||
#else
|
||||
struct list_head s_files;
|
||||
#endif
|
||||
/* s_dentry_lru and s_nr_dentry_unused are protected by dcache_lock */
|
||||
struct list_head s_dentry_lru; /* unused dentry lru */
|
||||
int s_nr_dentry_unused; /* # of dentry on lru */
|
||||
|
|
@ -2197,8 +2196,6 @@ static inline void insert_inode_hash(struct inode *inode) {
|
|||
__insert_inode_hash(inode, inode->i_ino);
|
||||
}
|
||||
|
||||
extern void file_move(struct file *f, struct list_head *list);
|
||||
extern void file_kill(struct file *f);
|
||||
#ifdef CONFIG_BLOCK
|
||||
extern void submit_bio(int, struct bio *);
|
||||
extern int bdev_read_only(struct block_device *);
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
struct fs_struct {
|
||||
int users;
|
||||
rwlock_t lock;
|
||||
spinlock_t lock;
|
||||
int umask;
|
||||
int in_exec;
|
||||
struct path root, pwd;
|
||||
|
|
@ -23,29 +23,29 @@ extern int unshare_fs_struct(void);
|
|||
|
||||
static inline void get_fs_root(struct fs_struct *fs, struct path *root)
|
||||
{
|
||||
read_lock(&fs->lock);
|
||||
spin_lock(&fs->lock);
|
||||
*root = fs->root;
|
||||
path_get(root);
|
||||
read_unlock(&fs->lock);
|
||||
spin_unlock(&fs->lock);
|
||||
}
|
||||
|
||||
static inline void get_fs_pwd(struct fs_struct *fs, struct path *pwd)
|
||||
{
|
||||
read_lock(&fs->lock);
|
||||
spin_lock(&fs->lock);
|
||||
*pwd = fs->pwd;
|
||||
path_get(pwd);
|
||||
read_unlock(&fs->lock);
|
||||
spin_unlock(&fs->lock);
|
||||
}
|
||||
|
||||
static inline void get_fs_root_and_pwd(struct fs_struct *fs, struct path *root,
|
||||
struct path *pwd)
|
||||
{
|
||||
read_lock(&fs->lock);
|
||||
spin_lock(&fs->lock);
|
||||
*root = fs->root;
|
||||
path_get(root);
|
||||
*pwd = fs->pwd;
|
||||
path_get(pwd);
|
||||
read_unlock(&fs->lock);
|
||||
spin_unlock(&fs->lock);
|
||||
}
|
||||
|
||||
#endif /* _LINUX_FS_STRUCT_H */
|
||||
|
|
|
|||
|
|
@ -156,6 +156,7 @@ struct fsnotify_group {
|
|||
struct mutex access_mutex;
|
||||
struct list_head access_list;
|
||||
wait_queue_head_t access_waitq;
|
||||
bool bypass_perm; /* protected by access_mutex */
|
||||
#endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */
|
||||
int f_flags;
|
||||
} fanotify_data;
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@
|
|||
#include <linux/errno.h>
|
||||
|
||||
struct device;
|
||||
struct gpio_chip;
|
||||
|
||||
/*
|
||||
* Some platforms don't support the GPIO programming interface.
|
||||
|
|
|
|||
|
|
@ -63,6 +63,9 @@
|
|||
* IRQ lines will appear. Similarly to gpio_base, the expander
|
||||
* will create a block of irqs beginning at this number.
|
||||
* This value is ignored if irq_summary is < 0.
|
||||
* @reset_during_probe: If set to true, the driver will trigger a full
|
||||
* reset of the chip at the beginning of the probe
|
||||
* in order to place it in a known state.
|
||||
*/
|
||||
struct sx150x_platform_data {
|
||||
unsigned gpio_base;
|
||||
|
|
@ -73,6 +76,7 @@ struct sx150x_platform_data {
|
|||
u16 io_polarity;
|
||||
int irq_summary;
|
||||
unsigned irq_base;
|
||||
bool reset_during_probe;
|
||||
};
|
||||
|
||||
#endif /* __LINUX_I2C_SX150X_H */
|
||||
|
|
|
|||
|
|
@ -119,7 +119,7 @@ struct ethhdr {
|
|||
unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
|
||||
unsigned char h_source[ETH_ALEN]; /* source ether addr */
|
||||
__be16 h_proto; /* packet type ID field */
|
||||
} __packed;
|
||||
} __attribute__((packed));
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#include <linux/skbuff.h>
|
||||
|
|
|
|||
|
|
@ -67,7 +67,7 @@ struct fddi_8022_1_hdr {
|
|||
__u8 dsap; /* destination service access point */
|
||||
__u8 ssap; /* source service access point */
|
||||
__u8 ctrl; /* control byte #1 */
|
||||
} __packed;
|
||||
} __attribute__((packed));
|
||||
|
||||
/* Define 802.2 Type 2 header */
|
||||
struct fddi_8022_2_hdr {
|
||||
|
|
@ -75,7 +75,7 @@ struct fddi_8022_2_hdr {
|
|||
__u8 ssap; /* source service access point */
|
||||
__u8 ctrl_1; /* control byte #1 */
|
||||
__u8 ctrl_2; /* control byte #2 */
|
||||
} __packed;
|
||||
} __attribute__((packed));
|
||||
|
||||
/* Define 802.2 SNAP header */
|
||||
#define FDDI_K_OUI_LEN 3
|
||||
|
|
@ -85,7 +85,7 @@ struct fddi_snap_hdr {
|
|||
__u8 ctrl; /* always 0x03 */
|
||||
__u8 oui[FDDI_K_OUI_LEN]; /* organizational universal id */
|
||||
__be16 ethertype; /* packet type ID field */
|
||||
} __packed;
|
||||
} __attribute__((packed));
|
||||
|
||||
/* Define FDDI LLC frame header */
|
||||
struct fddihdr {
|
||||
|
|
@ -98,7 +98,7 @@ struct fddihdr {
|
|||
struct fddi_8022_2_hdr llc_8022_2;
|
||||
struct fddi_snap_hdr llc_snap;
|
||||
} hdr;
|
||||
} __packed;
|
||||
} __attribute__((packed));
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#include <linux/netdevice.h>
|
||||
|
|
|
|||
|
|
@ -104,7 +104,7 @@ struct hippi_fp_hdr {
|
|||
__be32 fixed;
|
||||
#endif
|
||||
__be32 d2_size;
|
||||
} __packed;
|
||||
} __attribute__((packed));
|
||||
|
||||
struct hippi_le_hdr {
|
||||
#if defined (__BIG_ENDIAN_BITFIELD)
|
||||
|
|
@ -129,7 +129,7 @@ struct hippi_le_hdr {
|
|||
__u8 daddr[HIPPI_ALEN];
|
||||
__u16 locally_administered;
|
||||
__u8 saddr[HIPPI_ALEN];
|
||||
} __packed;
|
||||
} __attribute__((packed));
|
||||
|
||||
#define HIPPI_OUI_LEN 3
|
||||
/*
|
||||
|
|
@ -142,12 +142,12 @@ struct hippi_snap_hdr {
|
|||
__u8 ctrl; /* always 0x03 */
|
||||
__u8 oui[HIPPI_OUI_LEN]; /* organizational universal id (zero)*/
|
||||
__be16 ethertype; /* packet type ID field */
|
||||
} __packed;
|
||||
} __attribute__((packed));
|
||||
|
||||
struct hippi_hdr {
|
||||
struct hippi_fp_hdr fp;
|
||||
struct hippi_le_hdr le;
|
||||
struct hippi_snap_hdr snap;
|
||||
} __packed;
|
||||
} __attribute__((packed));
|
||||
|
||||
#endif /* _LINUX_IF_HIPPI_H */
|
||||
|
|
|
|||
|
|
@ -59,7 +59,7 @@ struct sockaddr_pppox {
|
|||
union{
|
||||
struct pppoe_addr pppoe;
|
||||
}sa_addr;
|
||||
} __packed;
|
||||
} __attribute__((packed));
|
||||
|
||||
/* The use of the above union isn't viable because the size of this
|
||||
* struct must stay fixed over time -- applications use sizeof(struct
|
||||
|
|
@ -70,7 +70,7 @@ struct sockaddr_pppol2tp {
|
|||
sa_family_t sa_family; /* address family, AF_PPPOX */
|
||||
unsigned int sa_protocol; /* protocol identifier */
|
||||
struct pppol2tp_addr pppol2tp;
|
||||
} __packed;
|
||||
} __attribute__((packed));
|
||||
|
||||
/* The L2TPv3 protocol changes tunnel and session ids from 16 to 32
|
||||
* bits. So we need a different sockaddr structure.
|
||||
|
|
@ -79,7 +79,7 @@ struct sockaddr_pppol2tpv3 {
|
|||
sa_family_t sa_family; /* address family, AF_PPPOX */
|
||||
unsigned int sa_protocol; /* protocol identifier */
|
||||
struct pppol2tpv3_addr pppol2tp;
|
||||
} __packed;
|
||||
} __attribute__((packed));
|
||||
|
||||
/*********************************************************************
|
||||
*
|
||||
|
|
@ -101,7 +101,7 @@ struct pppoe_tag {
|
|||
__be16 tag_type;
|
||||
__be16 tag_len;
|
||||
char tag_data[0];
|
||||
} __attribute ((packed));
|
||||
} __attribute__ ((packed));
|
||||
|
||||
/* Tag identifiers */
|
||||
#define PTT_EOL __cpu_to_be16(0x0000)
|
||||
|
|
@ -129,7 +129,7 @@ struct pppoe_hdr {
|
|||
__be16 sid;
|
||||
__be16 length;
|
||||
struct pppoe_tag tag[0];
|
||||
} __packed;
|
||||
} __attribute__((packed));
|
||||
|
||||
/* Length of entire PPPoE + PPP header */
|
||||
#define PPPOE_SES_HLEN 8
|
||||
|
|
|
|||
20
include/linux/intel-gtt.h
Normal file
20
include/linux/intel-gtt.h
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
/*
|
||||
* Common Intel AGPGART and GTT definitions.
|
||||
*/
|
||||
#ifndef _INTEL_GTT_H
|
||||
#define _INTEL_GTT_H
|
||||
|
||||
#include <linux/agp_backend.h>
|
||||
|
||||
/* This is for Intel only GTT controls.
|
||||
*
|
||||
* Sandybridge: AGP_USER_CACHED_MEMORY default to LLC only
|
||||
*/
|
||||
|
||||
#define AGP_USER_CACHED_MEMORY_LLC_MLC (AGP_USER_TYPES + 2)
|
||||
#define AGP_USER_UNCACHED_MEMORY (AGP_USER_TYPES + 4)
|
||||
|
||||
/* flag for GFDT type */
|
||||
#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3)
|
||||
|
||||
#endif
|
||||
|
|
@ -79,7 +79,7 @@ io_mapping_free(struct io_mapping *mapping)
|
|||
}
|
||||
|
||||
/* Atomic map/unmap */
|
||||
static inline void *
|
||||
static inline void __iomem *
|
||||
io_mapping_map_atomic_wc(struct io_mapping *mapping,
|
||||
unsigned long offset,
|
||||
int slot)
|
||||
|
|
@ -94,12 +94,12 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping,
|
|||
}
|
||||
|
||||
static inline void
|
||||
io_mapping_unmap_atomic(void *vaddr, int slot)
|
||||
io_mapping_unmap_atomic(void __iomem *vaddr, int slot)
|
||||
{
|
||||
iounmap_atomic(vaddr, slot);
|
||||
}
|
||||
|
||||
static inline void *
|
||||
static inline void __iomem *
|
||||
io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
|
||||
{
|
||||
resource_size_t phys_addr;
|
||||
|
|
@ -111,7 +111,7 @@ io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
|
|||
}
|
||||
|
||||
static inline void
|
||||
io_mapping_unmap(void *vaddr)
|
||||
io_mapping_unmap(void __iomem *vaddr)
|
||||
{
|
||||
iounmap(vaddr);
|
||||
}
|
||||
|
|
@ -125,38 +125,38 @@ struct io_mapping;
|
|||
static inline struct io_mapping *
|
||||
io_mapping_create_wc(resource_size_t base, unsigned long size)
|
||||
{
|
||||
return (struct io_mapping *) ioremap_wc(base, size);
|
||||
return (struct io_mapping __force *) ioremap_wc(base, size);
|
||||
}
|
||||
|
||||
static inline void
|
||||
io_mapping_free(struct io_mapping *mapping)
|
||||
{
|
||||
iounmap(mapping);
|
||||
iounmap((void __force __iomem *) mapping);
|
||||
}
|
||||
|
||||
/* Atomic map/unmap */
|
||||
static inline void *
|
||||
static inline void __iomem *
|
||||
io_mapping_map_atomic_wc(struct io_mapping *mapping,
|
||||
unsigned long offset,
|
||||
int slot)
|
||||
{
|
||||
return ((char *) mapping) + offset;
|
||||
return ((char __force __iomem *) mapping) + offset;
|
||||
}
|
||||
|
||||
static inline void
|
||||
io_mapping_unmap_atomic(void *vaddr, int slot)
|
||||
io_mapping_unmap_atomic(void __iomem *vaddr, int slot)
|
||||
{
|
||||
}
|
||||
|
||||
/* Non-atomic map/unmap */
|
||||
static inline void *
|
||||
static inline void __iomem *
|
||||
io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
|
||||
{
|
||||
return ((char *) mapping) + offset;
|
||||
return ((char __force __iomem *) mapping) + offset;
|
||||
}
|
||||
|
||||
static inline void
|
||||
io_mapping_unmap(void *vaddr)
|
||||
io_mapping_unmap(void __iomem *vaddr)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -58,7 +58,7 @@ struct ipv6_opt_hdr {
|
|||
/*
|
||||
* TLV encoded option data follows.
|
||||
*/
|
||||
} __packed; /* required for some archs */
|
||||
} __attribute__((packed)); /* required for some archs */
|
||||
|
||||
#define ipv6_destopt_hdr ipv6_opt_hdr
|
||||
#define ipv6_hopopt_hdr ipv6_opt_hdr
|
||||
|
|
@ -99,7 +99,7 @@ struct ipv6_destopt_hao {
|
|||
__u8 type;
|
||||
__u8 length;
|
||||
struct in6_addr addr;
|
||||
} __packed;
|
||||
} __attribute__((packed));
|
||||
|
||||
/*
|
||||
* IPv6 fixed header
|
||||
|
|
|
|||
|
|
@ -214,7 +214,7 @@ __kfifo_must_check_helper(unsigned int val)
|
|||
*/
|
||||
#define kfifo_reset(fifo) \
|
||||
(void)({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
__tmp->kfifo.in = __tmp->kfifo.out = 0; \
|
||||
})
|
||||
|
||||
|
|
@ -228,7 +228,7 @@ __kfifo_must_check_helper(unsigned int val)
|
|||
*/
|
||||
#define kfifo_reset_out(fifo) \
|
||||
(void)({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
__tmp->kfifo.out = __tmp->kfifo.in; \
|
||||
})
|
||||
|
||||
|
|
@ -238,7 +238,7 @@ __kfifo_must_check_helper(unsigned int val)
|
|||
*/
|
||||
#define kfifo_len(fifo) \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmpl = (fifo); \
|
||||
typeof((fifo) + 1) __tmpl = (fifo); \
|
||||
__tmpl->kfifo.in - __tmpl->kfifo.out; \
|
||||
})
|
||||
|
||||
|
|
@ -248,7 +248,7 @@ __kfifo_must_check_helper(unsigned int val)
|
|||
*/
|
||||
#define kfifo_is_empty(fifo) \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmpq = (fifo); \
|
||||
typeof((fifo) + 1) __tmpq = (fifo); \
|
||||
__tmpq->kfifo.in == __tmpq->kfifo.out; \
|
||||
})
|
||||
|
||||
|
|
@ -258,7 +258,7 @@ __kfifo_must_check_helper(unsigned int val)
|
|||
*/
|
||||
#define kfifo_is_full(fifo) \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmpq = (fifo); \
|
||||
typeof((fifo) + 1) __tmpq = (fifo); \
|
||||
kfifo_len(__tmpq) > __tmpq->kfifo.mask; \
|
||||
})
|
||||
|
||||
|
|
@ -269,7 +269,7 @@ __kfifo_must_check_helper(unsigned int val)
|
|||
#define kfifo_avail(fifo) \
|
||||
__kfifo_must_check_helper( \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmpq = (fifo); \
|
||||
typeof((fifo) + 1) __tmpq = (fifo); \
|
||||
const size_t __recsize = sizeof(*__tmpq->rectype); \
|
||||
unsigned int __avail = kfifo_size(__tmpq) - kfifo_len(__tmpq); \
|
||||
(__recsize) ? ((__avail <= __recsize) ? 0 : \
|
||||
|
|
@ -284,7 +284,7 @@ __kfifo_must_check_helper( \
|
|||
*/
|
||||
#define kfifo_skip(fifo) \
|
||||
(void)({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
const size_t __recsize = sizeof(*__tmp->rectype); \
|
||||
struct __kfifo *__kfifo = &__tmp->kfifo; \
|
||||
if (__recsize) \
|
||||
|
|
@ -302,7 +302,7 @@ __kfifo_must_check_helper( \
|
|||
#define kfifo_peek_len(fifo) \
|
||||
__kfifo_must_check_helper( \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
const size_t __recsize = sizeof(*__tmp->rectype); \
|
||||
struct __kfifo *__kfifo = &__tmp->kfifo; \
|
||||
(!__recsize) ? kfifo_len(__tmp) * sizeof(*__tmp->type) : \
|
||||
|
|
@ -325,7 +325,7 @@ __kfifo_must_check_helper( \
|
|||
#define kfifo_alloc(fifo, size, gfp_mask) \
|
||||
__kfifo_must_check_helper( \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
struct __kfifo *__kfifo = &__tmp->kfifo; \
|
||||
__is_kfifo_ptr(__tmp) ? \
|
||||
__kfifo_alloc(__kfifo, size, sizeof(*__tmp->type), gfp_mask) : \
|
||||
|
|
@ -339,7 +339,7 @@ __kfifo_must_check_helper( \
|
|||
*/
|
||||
#define kfifo_free(fifo) \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
struct __kfifo *__kfifo = &__tmp->kfifo; \
|
||||
if (__is_kfifo_ptr(__tmp)) \
|
||||
__kfifo_free(__kfifo); \
|
||||
|
|
@ -358,7 +358,7 @@ __kfifo_must_check_helper( \
|
|||
*/
|
||||
#define kfifo_init(fifo, buffer, size) \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
struct __kfifo *__kfifo = &__tmp->kfifo; \
|
||||
__is_kfifo_ptr(__tmp) ? \
|
||||
__kfifo_init(__kfifo, buffer, size, sizeof(*__tmp->type)) : \
|
||||
|
|
@ -379,8 +379,8 @@ __kfifo_must_check_helper( \
|
|||
*/
|
||||
#define kfifo_put(fifo, val) \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof(val + 1) __val = (val); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
typeof((val) + 1) __val = (val); \
|
||||
unsigned int __ret; \
|
||||
const size_t __recsize = sizeof(*__tmp->rectype); \
|
||||
struct __kfifo *__kfifo = &__tmp->kfifo; \
|
||||
|
|
@ -421,8 +421,8 @@ __kfifo_must_check_helper( \
|
|||
#define kfifo_get(fifo, val) \
|
||||
__kfifo_must_check_helper( \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof(val + 1) __val = (val); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
typeof((val) + 1) __val = (val); \
|
||||
unsigned int __ret; \
|
||||
const size_t __recsize = sizeof(*__tmp->rectype); \
|
||||
struct __kfifo *__kfifo = &__tmp->kfifo; \
|
||||
|
|
@ -462,8 +462,8 @@ __kfifo_must_check_helper( \
|
|||
#define kfifo_peek(fifo, val) \
|
||||
__kfifo_must_check_helper( \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof(val + 1) __val = (val); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
typeof((val) + 1) __val = (val); \
|
||||
unsigned int __ret; \
|
||||
const size_t __recsize = sizeof(*__tmp->rectype); \
|
||||
struct __kfifo *__kfifo = &__tmp->kfifo; \
|
||||
|
|
@ -501,8 +501,8 @@ __kfifo_must_check_helper( \
|
|||
*/
|
||||
#define kfifo_in(fifo, buf, n) \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof(buf + 1) __buf = (buf); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
typeof((buf) + 1) __buf = (buf); \
|
||||
unsigned long __n = (n); \
|
||||
const size_t __recsize = sizeof(*__tmp->rectype); \
|
||||
struct __kfifo *__kfifo = &__tmp->kfifo; \
|
||||
|
|
@ -554,8 +554,8 @@ __kfifo_must_check_helper( \
|
|||
#define kfifo_out(fifo, buf, n) \
|
||||
__kfifo_must_check_helper( \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof(buf + 1) __buf = (buf); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
typeof((buf) + 1) __buf = (buf); \
|
||||
unsigned long __n = (n); \
|
||||
const size_t __recsize = sizeof(*__tmp->rectype); \
|
||||
struct __kfifo *__kfifo = &__tmp->kfifo; \
|
||||
|
|
@ -611,7 +611,7 @@ __kfifo_must_check_helper( \
|
|||
#define kfifo_from_user(fifo, from, len, copied) \
|
||||
__kfifo_must_check_helper( \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
const void __user *__from = (from); \
|
||||
unsigned int __len = (len); \
|
||||
unsigned int *__copied = (copied); \
|
||||
|
|
@ -639,7 +639,7 @@ __kfifo_must_check_helper( \
|
|||
#define kfifo_to_user(fifo, to, len, copied) \
|
||||
__kfifo_must_check_helper( \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
void __user *__to = (to); \
|
||||
unsigned int __len = (len); \
|
||||
unsigned int *__copied = (copied); \
|
||||
|
|
@ -666,7 +666,7 @@ __kfifo_must_check_helper( \
|
|||
*/
|
||||
#define kfifo_dma_in_prepare(fifo, sgl, nents, len) \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
struct scatterlist *__sgl = (sgl); \
|
||||
int __nents = (nents); \
|
||||
unsigned int __len = (len); \
|
||||
|
|
@ -690,7 +690,7 @@ __kfifo_must_check_helper( \
|
|||
*/
|
||||
#define kfifo_dma_in_finish(fifo, len) \
|
||||
(void)({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
unsigned int __len = (len); \
|
||||
const size_t __recsize = sizeof(*__tmp->rectype); \
|
||||
struct __kfifo *__kfifo = &__tmp->kfifo; \
|
||||
|
|
@ -717,7 +717,7 @@ __kfifo_must_check_helper( \
|
|||
*/
|
||||
#define kfifo_dma_out_prepare(fifo, sgl, nents, len) \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
struct scatterlist *__sgl = (sgl); \
|
||||
int __nents = (nents); \
|
||||
unsigned int __len = (len); \
|
||||
|
|
@ -741,7 +741,7 @@ __kfifo_must_check_helper( \
|
|||
*/
|
||||
#define kfifo_dma_out_finish(fifo, len) \
|
||||
(void)({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
unsigned int __len = (len); \
|
||||
const size_t __recsize = sizeof(*__tmp->rectype); \
|
||||
struct __kfifo *__kfifo = &__tmp->kfifo; \
|
||||
|
|
@ -766,8 +766,8 @@ __kfifo_must_check_helper( \
|
|||
#define kfifo_out_peek(fifo, buf, n) \
|
||||
__kfifo_must_check_helper( \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof(buf + 1) __buf = (buf); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
typeof((buf) + 1) __buf = (buf); \
|
||||
unsigned long __n = (n); \
|
||||
const size_t __recsize = sizeof(*__tmp->rectype); \
|
||||
struct __kfifo *__kfifo = &__tmp->kfifo; \
|
||||
|
|
@ -836,6 +836,8 @@ extern void __kfifo_dma_out_finish_r(struct __kfifo *fifo, size_t recsize);
|
|||
|
||||
extern unsigned int __kfifo_len_r(struct __kfifo *fifo, size_t recsize);
|
||||
|
||||
extern void __kfifo_skip_r(struct __kfifo *fifo, size_t recsize);
|
||||
|
||||
extern unsigned int __kfifo_out_peek_r(struct __kfifo *fifo,
|
||||
void *buf, unsigned int len, size_t recsize);
|
||||
|
||||
|
|
|
|||
|
|
@ -22,6 +22,7 @@
|
|||
#include <linux/compiler.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/kref.h>
|
||||
#include <linux/kobject_ns.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/wait.h>
|
||||
#include <asm/atomic.h>
|
||||
|
|
@ -136,42 +137,8 @@ struct kobj_attribute {
|
|||
|
||||
extern const struct sysfs_ops kobj_sysfs_ops;
|
||||
|
||||
/*
|
||||
* Namespace types which are used to tag kobjects and sysfs entries.
|
||||
* Network namespace will likely be the first.
|
||||
*/
|
||||
enum kobj_ns_type {
|
||||
KOBJ_NS_TYPE_NONE = 0,
|
||||
KOBJ_NS_TYPE_NET,
|
||||
KOBJ_NS_TYPES
|
||||
};
|
||||
|
||||
struct sock;
|
||||
|
||||
/*
|
||||
* Callbacks so sysfs can determine namespaces
|
||||
* @current_ns: return calling task's namespace
|
||||
* @netlink_ns: return namespace to which a sock belongs (right?)
|
||||
* @initial_ns: return the initial namespace (i.e. init_net_ns)
|
||||
*/
|
||||
struct kobj_ns_type_operations {
|
||||
enum kobj_ns_type type;
|
||||
const void *(*current_ns)(void);
|
||||
const void *(*netlink_ns)(struct sock *sk);
|
||||
const void *(*initial_ns)(void);
|
||||
};
|
||||
|
||||
int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
|
||||
int kobj_ns_type_registered(enum kobj_ns_type type);
|
||||
const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent);
|
||||
const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj);
|
||||
|
||||
const void *kobj_ns_current(enum kobj_ns_type type);
|
||||
const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk);
|
||||
const void *kobj_ns_initial(enum kobj_ns_type type);
|
||||
void kobj_ns_exit(enum kobj_ns_type type, const void *ns);
|
||||
|
||||
|
||||
/**
|
||||
* struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
|
||||
*
|
||||
|
|
|
|||
56
include/linux/kobject_ns.h
Normal file
56
include/linux/kobject_ns.h
Normal file
|
|
@ -0,0 +1,56 @@
|
|||
/* Kernel object name space definitions
|
||||
*
|
||||
* Copyright (c) 2002-2003 Patrick Mochel
|
||||
* Copyright (c) 2002-2003 Open Source Development Labs
|
||||
* Copyright (c) 2006-2008 Greg Kroah-Hartman <greg@kroah.com>
|
||||
* Copyright (c) 2006-2008 Novell Inc.
|
||||
*
|
||||
* Split from kobject.h by David Howells (dhowells@redhat.com)
|
||||
*
|
||||
* This file is released under the GPLv2.
|
||||
*
|
||||
* Please read Documentation/kobject.txt before using the kobject
|
||||
* interface, ESPECIALLY the parts about reference counts and object
|
||||
* destructors.
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_KOBJECT_NS_H
|
||||
#define _LINUX_KOBJECT_NS_H
|
||||
|
||||
struct sock;
|
||||
struct kobject;
|
||||
|
||||
/*
|
||||
* Namespace types which are used to tag kobjects and sysfs entries.
|
||||
* Network namespace will likely be the first.
|
||||
*/
|
||||
enum kobj_ns_type {
|
||||
KOBJ_NS_TYPE_NONE = 0,
|
||||
KOBJ_NS_TYPE_NET,
|
||||
KOBJ_NS_TYPES
|
||||
};
|
||||
|
||||
/*
|
||||
* Callbacks so sysfs can determine namespaces
|
||||
* @current_ns: return calling task's namespace
|
||||
* @netlink_ns: return namespace to which a sock belongs (right?)
|
||||
* @initial_ns: return the initial namespace (i.e. init_net_ns)
|
||||
*/
|
||||
struct kobj_ns_type_operations {
|
||||
enum kobj_ns_type type;
|
||||
const void *(*current_ns)(void);
|
||||
const void *(*netlink_ns)(struct sock *sk);
|
||||
const void *(*initial_ns)(void);
|
||||
};
|
||||
|
||||
int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
|
||||
int kobj_ns_type_registered(enum kobj_ns_type type);
|
||||
const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent);
|
||||
const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj);
|
||||
|
||||
const void *kobj_ns_current(enum kobj_ns_type type);
|
||||
const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk);
|
||||
const void *kobj_ns_initial(enum kobj_ns_type type);
|
||||
void kobj_ns_exit(enum kobj_ns_type type, const void *ns);
|
||||
|
||||
#endif /* _LINUX_KOBJECT_NS_H */
|
||||
|
|
@ -16,6 +16,9 @@
|
|||
struct stable_node;
|
||||
struct mem_cgroup;
|
||||
|
||||
struct page *ksm_does_need_to_copy(struct page *page,
|
||||
struct vm_area_struct *vma, unsigned long address);
|
||||
|
||||
#ifdef CONFIG_KSM
|
||||
int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
|
||||
unsigned long end, int advice, unsigned long *vm_flags);
|
||||
|
|
@ -70,19 +73,14 @@ static inline void set_page_stable_node(struct page *page,
|
|||
* We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
|
||||
* but what if the vma was unmerged while the page was swapped out?
|
||||
*/
|
||||
struct page *ksm_does_need_to_copy(struct page *page,
|
||||
struct vm_area_struct *vma, unsigned long address);
|
||||
static inline struct page *ksm_might_need_to_copy(struct page *page,
|
||||
static inline int ksm_might_need_to_copy(struct page *page,
|
||||
struct vm_area_struct *vma, unsigned long address)
|
||||
{
|
||||
struct anon_vma *anon_vma = page_anon_vma(page);
|
||||
|
||||
if (!anon_vma ||
|
||||
(anon_vma->root == vma->anon_vma->root &&
|
||||
page->index == linear_page_index(vma, address)))
|
||||
return page;
|
||||
|
||||
return ksm_does_need_to_copy(page, vma, address);
|
||||
return anon_vma &&
|
||||
(anon_vma->root != vma->anon_vma->root ||
|
||||
page->index != linear_page_index(vma, address));
|
||||
}
|
||||
|
||||
int page_referenced_ksm(struct page *page,
|
||||
|
|
@ -115,10 +113,10 @@ static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline struct page *ksm_might_need_to_copy(struct page *page,
|
||||
static inline int ksm_might_need_to_copy(struct page *page,
|
||||
struct vm_area_struct *vma, unsigned long address)
|
||||
{
|
||||
return page;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int page_referenced_ksm(struct page *page,
|
||||
|
|
|
|||
172
include/linux/lglock.h
Normal file
172
include/linux/lglock.h
Normal file
|
|
@ -0,0 +1,172 @@
|
|||
/*
|
||||
* Specialised local-global spinlock. Can only be declared as global variables
|
||||
* to avoid overhead and keep things simple (and we don't want to start using
|
||||
* these inside dynamically allocated structures).
|
||||
*
|
||||
* "local/global locks" (lglocks) can be used to:
|
||||
*
|
||||
* - Provide fast exclusive access to per-CPU data, with exclusive access to
|
||||
* another CPU's data allowed but possibly subject to contention, and to
|
||||
* provide very slow exclusive access to all per-CPU data.
|
||||
* - Or to provide very fast and scalable read serialisation, and to provide
|
||||
* very slow exclusive serialisation of data (not necessarily per-CPU data).
|
||||
*
|
||||
* Brlocks are also implemented as a short-hand notation for the latter use
|
||||
* case.
|
||||
*
|
||||
* Copyright 2009, 2010, Nick Piggin, Novell Inc.
|
||||
*/
|
||||
#ifndef __LINUX_LGLOCK_H
|
||||
#define __LINUX_LGLOCK_H
|
||||
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/lockdep.h>
|
||||
#include <linux/percpu.h>
|
||||
|
||||
/* can make br locks by using local lock for read side, global lock for write */
|
||||
#define br_lock_init(name) name##_lock_init()
|
||||
#define br_read_lock(name) name##_local_lock()
|
||||
#define br_read_unlock(name) name##_local_unlock()
|
||||
#define br_write_lock(name) name##_global_lock_online()
|
||||
#define br_write_unlock(name) name##_global_unlock_online()
|
||||
|
||||
#define DECLARE_BRLOCK(name) DECLARE_LGLOCK(name)
|
||||
#define DEFINE_BRLOCK(name) DEFINE_LGLOCK(name)
|
||||
|
||||
|
||||
#define lg_lock_init(name) name##_lock_init()
|
||||
#define lg_local_lock(name) name##_local_lock()
|
||||
#define lg_local_unlock(name) name##_local_unlock()
|
||||
#define lg_local_lock_cpu(name, cpu) name##_local_lock_cpu(cpu)
|
||||
#define lg_local_unlock_cpu(name, cpu) name##_local_unlock_cpu(cpu)
|
||||
#define lg_global_lock(name) name##_global_lock()
|
||||
#define lg_global_unlock(name) name##_global_unlock()
|
||||
#define lg_global_lock_online(name) name##_global_lock_online()
|
||||
#define lg_global_unlock_online(name) name##_global_unlock_online()
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
#define LOCKDEP_INIT_MAP lockdep_init_map
|
||||
|
||||
#define DEFINE_LGLOCK_LOCKDEP(name) \
|
||||
struct lock_class_key name##_lock_key; \
|
||||
struct lockdep_map name##_lock_dep_map; \
|
||||
EXPORT_SYMBOL(name##_lock_dep_map)
|
||||
|
||||
#else
|
||||
#define LOCKDEP_INIT_MAP(a, b, c, d)
|
||||
|
||||
#define DEFINE_LGLOCK_LOCKDEP(name)
|
||||
#endif
|
||||
|
||||
|
||||
#define DECLARE_LGLOCK(name) \
|
||||
extern void name##_lock_init(void); \
|
||||
extern void name##_local_lock(void); \
|
||||
extern void name##_local_unlock(void); \
|
||||
extern void name##_local_lock_cpu(int cpu); \
|
||||
extern void name##_local_unlock_cpu(int cpu); \
|
||||
extern void name##_global_lock(void); \
|
||||
extern void name##_global_unlock(void); \
|
||||
extern void name##_global_lock_online(void); \
|
||||
extern void name##_global_unlock_online(void); \
|
||||
|
||||
#define DEFINE_LGLOCK(name) \
|
||||
\
|
||||
DEFINE_PER_CPU(arch_spinlock_t, name##_lock); \
|
||||
DEFINE_LGLOCK_LOCKDEP(name); \
|
||||
\
|
||||
void name##_lock_init(void) { \
|
||||
int i; \
|
||||
LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \
|
||||
for_each_possible_cpu(i) { \
|
||||
arch_spinlock_t *lock; \
|
||||
lock = &per_cpu(name##_lock, i); \
|
||||
*lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; \
|
||||
} \
|
||||
} \
|
||||
EXPORT_SYMBOL(name##_lock_init); \
|
||||
\
|
||||
void name##_local_lock(void) { \
|
||||
arch_spinlock_t *lock; \
|
||||
preempt_disable(); \
|
||||
rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_); \
|
||||
lock = &__get_cpu_var(name##_lock); \
|
||||
arch_spin_lock(lock); \
|
||||
} \
|
||||
EXPORT_SYMBOL(name##_local_lock); \
|
||||
\
|
||||
void name##_local_unlock(void) { \
|
||||
arch_spinlock_t *lock; \
|
||||
rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_); \
|
||||
lock = &__get_cpu_var(name##_lock); \
|
||||
arch_spin_unlock(lock); \
|
||||
preempt_enable(); \
|
||||
} \
|
||||
EXPORT_SYMBOL(name##_local_unlock); \
|
||||
\
|
||||
void name##_local_lock_cpu(int cpu) { \
|
||||
arch_spinlock_t *lock; \
|
||||
preempt_disable(); \
|
||||
rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_); \
|
||||
lock = &per_cpu(name##_lock, cpu); \
|
||||
arch_spin_lock(lock); \
|
||||
} \
|
||||
EXPORT_SYMBOL(name##_local_lock_cpu); \
|
||||
\
|
||||
void name##_local_unlock_cpu(int cpu) { \
|
||||
arch_spinlock_t *lock; \
|
||||
rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_); \
|
||||
lock = &per_cpu(name##_lock, cpu); \
|
||||
arch_spin_unlock(lock); \
|
||||
preempt_enable(); \
|
||||
} \
|
||||
EXPORT_SYMBOL(name##_local_unlock_cpu); \
|
||||
\
|
||||
void name##_global_lock_online(void) { \
|
||||
int i; \
|
||||
preempt_disable(); \
|
||||
rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \
|
||||
for_each_online_cpu(i) { \
|
||||
arch_spinlock_t *lock; \
|
||||
lock = &per_cpu(name##_lock, i); \
|
||||
arch_spin_lock(lock); \
|
||||
} \
|
||||
} \
|
||||
EXPORT_SYMBOL(name##_global_lock_online); \
|
||||
\
|
||||
void name##_global_unlock_online(void) { \
|
||||
int i; \
|
||||
rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \
|
||||
for_each_online_cpu(i) { \
|
||||
arch_spinlock_t *lock; \
|
||||
lock = &per_cpu(name##_lock, i); \
|
||||
arch_spin_unlock(lock); \
|
||||
} \
|
||||
preempt_enable(); \
|
||||
} \
|
||||
EXPORT_SYMBOL(name##_global_unlock_online); \
|
||||
\
|
||||
void name##_global_lock(void) { \
|
||||
int i; \
|
||||
preempt_disable(); \
|
||||
rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \
|
||||
for_each_possible_cpu(i) { \
|
||||
arch_spinlock_t *lock; \
|
||||
lock = &per_cpu(name##_lock, i); \
|
||||
arch_spin_lock(lock); \
|
||||
} \
|
||||
} \
|
||||
EXPORT_SYMBOL(name##_global_lock); \
|
||||
\
|
||||
void name##_global_unlock(void) { \
|
||||
int i; \
|
||||
rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \
|
||||
for_each_possible_cpu(i) { \
|
||||
arch_spinlock_t *lock; \
|
||||
lock = &per_cpu(name##_lock, i); \
|
||||
arch_spin_unlock(lock); \
|
||||
} \
|
||||
preempt_enable(); \
|
||||
} \
|
||||
EXPORT_SYMBOL(name##_global_unlock);
|
||||
#endif
|
||||
|
|
@ -335,6 +335,7 @@ enum {
|
|||
ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */
|
||||
ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */
|
||||
ATA_EHI_QUIET = (1 << 3), /* be quiet */
|
||||
ATA_EHI_NO_RECOVERY = (1 << 4), /* no recovery */
|
||||
|
||||
ATA_EHI_DID_SOFTRESET = (1 << 16), /* already soft-reset this port */
|
||||
ATA_EHI_DID_HARDRESET = (1 << 17), /* already soft-reset this port */
|
||||
|
|
@ -723,6 +724,7 @@ struct ata_port {
|
|||
struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */
|
||||
u8 ctl; /* cache of ATA control register */
|
||||
u8 last_ctl; /* Cache last written value */
|
||||
struct ata_link* sff_pio_task_link; /* link currently used */
|
||||
struct delayed_work sff_pio_task;
|
||||
#ifdef CONFIG_ATA_BMDMA
|
||||
struct ata_bmdma_prd *bmdma_prd; /* BMDMA SG list */
|
||||
|
|
@ -1594,7 +1596,7 @@ extern void ata_sff_irq_on(struct ata_port *ap);
|
|||
extern void ata_sff_irq_clear(struct ata_port *ap);
|
||||
extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
|
||||
u8 status, int in_wq);
|
||||
extern void ata_sff_queue_pio_task(struct ata_port *ap, unsigned long delay);
|
||||
extern void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay);
|
||||
extern unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc);
|
||||
extern bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc);
|
||||
extern unsigned int ata_sff_port_intr(struct ata_port *ap,
|
||||
|
|
|
|||
|
|
@ -33,6 +33,7 @@
|
|||
#define MWAVE_MINOR 219 /* ACP/Mwave Modem */
|
||||
#define MPT_MINOR 220
|
||||
#define MPT2SAS_MINOR 221
|
||||
#define UINPUT_MINOR 223
|
||||
#define HPET_MINOR 228
|
||||
#define FUSE_MINOR 229
|
||||
#define KVM_MINOR 232
|
||||
|
|
|
|||
|
|
@ -78,7 +78,11 @@ extern unsigned int kobjsize(const void *objp);
|
|||
#define VM_MAYSHARE 0x00000080
|
||||
|
||||
#define VM_GROWSDOWN 0x00000100 /* general info on the segment */
|
||||
#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
|
||||
#define VM_GROWSUP 0x00000200
|
||||
#else
|
||||
#define VM_GROWSUP 0x00000000
|
||||
#endif
|
||||
#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */
|
||||
#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
|
||||
|
||||
|
|
@ -860,6 +864,12 @@ int set_page_dirty(struct page *page);
|
|||
int set_page_dirty_lock(struct page *page);
|
||||
int clear_page_dirty_for_io(struct page *page);
|
||||
|
||||
/* Is the vma a continuation of the stack vma above it? */
|
||||
static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
|
||||
{
|
||||
return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
|
||||
}
|
||||
|
||||
extern unsigned long move_page_tables(struct vm_area_struct *vma,
|
||||
unsigned long old_addr, struct vm_area_struct *new_vma,
|
||||
unsigned long new_addr, unsigned long len);
|
||||
|
|
@ -1330,8 +1340,10 @@ unsigned long ra_submit(struct file_ra_state *ra,
|
|||
|
||||
/* Do stack extension */
|
||||
extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
|
||||
#ifdef CONFIG_IA64
|
||||
#if VM_GROWSUP
|
||||
extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
|
||||
#else
|
||||
#define expand_upwards(vma, address) do { } while (0)
|
||||
#endif
|
||||
extern int expand_stack_downwards(struct vm_area_struct *vma,
|
||||
unsigned long address);
|
||||
|
|
@ -1357,7 +1369,15 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
|
|||
return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
pgprot_t vm_get_page_prot(unsigned long vm_flags);
|
||||
#else
|
||||
static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
|
||||
{
|
||||
return __pgprot(0);
|
||||
}
|
||||
#endif
|
||||
|
||||
struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
|
||||
int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
|
||||
unsigned long pfn, unsigned long size, pgprot_t);
|
||||
|
|
|
|||
|
|
@ -134,7 +134,7 @@ struct vm_area_struct {
|
|||
within vm_mm. */
|
||||
|
||||
/* linked list of VM areas per task, sorted by address */
|
||||
struct vm_area_struct *vm_next;
|
||||
struct vm_area_struct *vm_next, *vm_prev;
|
||||
|
||||
pgprot_t vm_page_prot; /* Access permissions of this VMA. */
|
||||
unsigned long vm_flags; /* Flags, see mm.h. */
|
||||
|
|
|
|||
|
|
@ -38,6 +38,8 @@
|
|||
* [8:0] Byte/block count
|
||||
*/
|
||||
|
||||
#define R4_MEMORY_PRESENT (1 << 27)
|
||||
|
||||
/*
|
||||
SDIO status in R5
|
||||
Type
|
||||
|
|
|
|||
|
|
@ -283,6 +283,13 @@ struct zone {
|
|||
/* zone watermarks, access with *_wmark_pages(zone) macros */
|
||||
unsigned long watermark[NR_WMARK];
|
||||
|
||||
/*
|
||||
* When free pages are below this point, additional steps are taken
|
||||
* when reading the number of free pages to avoid per-cpu counter
|
||||
* drift allowing watermarks to be breached
|
||||
*/
|
||||
unsigned long percpu_drift_mark;
|
||||
|
||||
/*
|
||||
* We don't know if the memory that we're going to allocate will be freeable
|
||||
* or/and it will be released eventually, so to avoid totally wasting several
|
||||
|
|
@ -441,6 +448,12 @@ static inline int zone_is_oom_locked(const struct zone *zone)
|
|||
return test_bit(ZONE_OOM_LOCKED, &zone->flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
unsigned long zone_nr_free_pages(struct zone *zone);
|
||||
#else
|
||||
#define zone_nr_free_pages(zone) zone_page_state(zone, NR_FREE_PAGES)
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
/*
|
||||
* The "priority" of VM scanning is how much of the queues we will scan in one
|
||||
* go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
|
||||
|
|
|
|||
|
|
@ -78,6 +78,14 @@ struct mutex_waiter {
|
|||
# include <linux/mutex-debug.h>
|
||||
#else
|
||||
# define __DEBUG_MUTEX_INITIALIZER(lockname)
|
||||
/**
|
||||
* mutex_init - initialize the mutex
|
||||
* @mutex: the mutex to be initialized
|
||||
*
|
||||
* Initialize the mutex to unlocked state.
|
||||
*
|
||||
* It is not allowed to initialize an already locked mutex.
|
||||
*/
|
||||
# define mutex_init(mutex) \
|
||||
do { \
|
||||
static struct lock_class_key __key; \
|
||||
|
|
|
|||
|
|
@ -88,7 +88,7 @@ struct nbd_request {
|
|||
char handle[8];
|
||||
__be64 from;
|
||||
__be32 len;
|
||||
} __packed;
|
||||
} __attribute__((packed));
|
||||
|
||||
/*
|
||||
* This is the reply packet that nbd-server sends back to the client after
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ struct ncp_request_header {
|
|||
__u8 conn_high;
|
||||
__u8 function;
|
||||
__u8 data[0];
|
||||
} __packed;
|
||||
} __attribute__((packed));
|
||||
|
||||
#define NCP_REPLY (0x3333)
|
||||
#define NCP_WATCHDOG (0x3E3E)
|
||||
|
|
@ -42,7 +42,7 @@ struct ncp_reply_header {
|
|||
__u8 completion_code;
|
||||
__u8 connection_state;
|
||||
__u8 data[0];
|
||||
} __packed;
|
||||
} __attribute__((packed));
|
||||
|
||||
#define NCP_VOLNAME_LEN (16)
|
||||
#define NCP_NUMBER_OF_VOLUMES (256)
|
||||
|
|
@ -158,7 +158,7 @@ struct nw_info_struct {
|
|||
#ifdef __KERNEL__
|
||||
struct nw_nfs_info nfs;
|
||||
#endif
|
||||
} __packed;
|
||||
} __attribute__((packed));
|
||||
|
||||
/* modify mask - use with MODIFY_DOS_INFO structure */
|
||||
#define DM_ATTRIBUTES (cpu_to_le32(0x02))
|
||||
|
|
@ -190,12 +190,12 @@ struct nw_modify_dos_info {
|
|||
__u16 inheritanceGrantMask;
|
||||
__u16 inheritanceRevokeMask;
|
||||
__u32 maximumSpace;
|
||||
} __packed;
|
||||
} __attribute__((packed));
|
||||
|
||||
struct nw_search_sequence {
|
||||
__u8 volNumber;
|
||||
__u32 dirBase;
|
||||
__u32 sequence;
|
||||
} __packed;
|
||||
} __attribute__((packed));
|
||||
|
||||
#endif /* _LINUX_NCP_H */
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ struct idletimer_tg_info {
|
|||
char label[MAX_IDLETIMER_LABEL_SIZE];
|
||||
|
||||
/* for kernel module internal use only */
|
||||
struct idletimer_tg *timer __attribute((aligned(8)));
|
||||
struct idletimer_tg *timer __attribute__((aligned(8)));
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -1,6 +1,8 @@
|
|||
#ifndef _XT_IPVS_H
|
||||
#define _XT_IPVS_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
enum {
|
||||
XT_IPVS_IPVS_PROPERTY = 1 << 0, /* all other options imply this one */
|
||||
XT_IPVS_PROTO = 1 << 1,
|
||||
|
|
|
|||
|
|
@ -27,8 +27,6 @@
|
|||
|
||||
#define MAX_LINKS 32
|
||||
|
||||
struct net;
|
||||
|
||||
struct sockaddr_nl {
|
||||
sa_family_t nl_family; /* AF_NETLINK */
|
||||
unsigned short nl_pad; /* zero */
|
||||
|
|
@ -151,6 +149,8 @@ struct nlattr {
|
|||
#include <linux/capability.h>
|
||||
#include <linux/skbuff.h>
|
||||
|
||||
struct net;
|
||||
|
||||
static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb)
|
||||
{
|
||||
return (struct nlmsghdr *)skb->data;
|
||||
|
|
|
|||
|
|
@ -63,20 +63,20 @@ static inline bool netpoll_rx(struct sk_buff *skb)
|
|||
unsigned long flags;
|
||||
bool ret = false;
|
||||
|
||||
rcu_read_lock_bh();
|
||||
local_irq_save(flags);
|
||||
npinfo = rcu_dereference_bh(skb->dev->npinfo);
|
||||
|
||||
if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags))
|
||||
goto out;
|
||||
|
||||
spin_lock_irqsave(&npinfo->rx_lock, flags);
|
||||
spin_lock(&npinfo->rx_lock);
|
||||
/* check rx_flags again with the lock held */
|
||||
if (npinfo->rx_flags && __netpoll_rx(skb))
|
||||
ret = true;
|
||||
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
|
||||
spin_unlock(&npinfo->rx_lock);
|
||||
|
||||
out:
|
||||
rcu_read_unlock_bh();
|
||||
local_irq_restore(flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1214,6 +1214,9 @@ static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
|
|||
unsigned int devfn)
|
||||
{ return NULL; }
|
||||
|
||||
static inline int pci_domain_nr(struct pci_bus *bus)
|
||||
{ return 0; }
|
||||
|
||||
#define dev_is_pci(d) (false)
|
||||
#define dev_is_pf(d) (false)
|
||||
#define dev_num_vf(d) (0)
|
||||
|
|
|
|||
|
|
@ -393,6 +393,9 @@
|
|||
#define PCI_DEVICE_ID_VLSI_82C147 0x0105
|
||||
#define PCI_DEVICE_ID_VLSI_VAS96011 0x0702
|
||||
|
||||
/* AMD RD890 Chipset */
|
||||
#define PCI_DEVICE_ID_RD890_IOMMU 0x5a23
|
||||
|
||||
#define PCI_VENDOR_ID_ADL 0x1005
|
||||
#define PCI_DEVICE_ID_ADL_2301 0x2301
|
||||
|
||||
|
|
@ -2300,6 +2303,8 @@
|
|||
#define PCI_DEVICE_ID_P2010 0x0079
|
||||
#define PCI_DEVICE_ID_P1020E 0x0100
|
||||
#define PCI_DEVICE_ID_P1020 0x0101
|
||||
#define PCI_DEVICE_ID_P1021E 0x0102
|
||||
#define PCI_DEVICE_ID_P1021 0x0103
|
||||
#define PCI_DEVICE_ID_P1011E 0x0108
|
||||
#define PCI_DEVICE_ID_P1011 0x0109
|
||||
#define PCI_DEVICE_ID_P1022E 0x0110
|
||||
|
|
|
|||
|
|
@ -149,7 +149,7 @@ extern void __init percpu_init_late(void);
|
|||
|
||||
#else /* CONFIG_SMP */
|
||||
|
||||
#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
|
||||
#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); })
|
||||
|
||||
/* can't distinguish from other static vars, always false */
|
||||
static inline bool is_kernel_percpu_address(unsigned long addr)
|
||||
|
|
|
|||
|
|
@ -56,7 +56,7 @@ struct phonethdr {
|
|||
__be16 pn_length;
|
||||
__u8 pn_robj;
|
||||
__u8 pn_sobj;
|
||||
} __packed;
|
||||
} __attribute__((packed));
|
||||
|
||||
/* Common Phonet payload header */
|
||||
struct phonetmsg {
|
||||
|
|
@ -98,7 +98,7 @@ struct sockaddr_pn {
|
|||
__u8 spn_dev;
|
||||
__u8 spn_resource;
|
||||
__u8 spn_zero[sizeof(struct sockaddr) - sizeof(sa_family_t) - 3];
|
||||
} __packed;
|
||||
} __attribute__((packed));
|
||||
|
||||
/* Well known address */
|
||||
#define PN_DEV_PC 0x10
|
||||
|
|
|
|||
30
include/linux/pxa168_eth.h
Normal file
30
include/linux/pxa168_eth.h
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
/*
|
||||
*pxa168 ethernet platform device data definition file.
|
||||
*/
|
||||
#ifndef __LINUX_PXA168_ETH_H
|
||||
#define __LINUX_PXA168_ETH_H
|
||||
|
||||
struct pxa168_eth_platform_data {
|
||||
int port_number;
|
||||
int phy_addr;
|
||||
|
||||
/*
|
||||
* If speed is 0, then speed and duplex are autonegotiated.
|
||||
*/
|
||||
int speed; /* 0, SPEED_10, SPEED_100 */
|
||||
int duplex; /* DUPLEX_HALF or DUPLEX_FULL */
|
||||
|
||||
/*
|
||||
* Override default RX/TX queue sizes if nonzero.
|
||||
*/
|
||||
int rx_queue_size;
|
||||
int tx_queue_size;
|
||||
|
||||
/*
|
||||
* init callback is used for board specific initialization
|
||||
* e.g on Aspenite its used to initialize the PHY transceiver.
|
||||
*/
|
||||
int (*init)(void);
|
||||
};
|
||||
|
||||
#endif /* __LINUX_PXA168_ETH_H */
|
||||
|
|
@ -274,8 +274,14 @@ static inline int dquot_alloc_space(struct inode *inode, qsize_t nr)
|
|||
int ret;
|
||||
|
||||
ret = dquot_alloc_space_nodirty(inode, nr);
|
||||
if (!ret)
|
||||
mark_inode_dirty_sync(inode);
|
||||
if (!ret) {
|
||||
/*
|
||||
* Mark inode fully dirty. Since we are allocating blocks, inode
|
||||
* would become fully dirty soon anyway and it reportedly
|
||||
* reduces inode_lock contention.
|
||||
*/
|
||||
mark_inode_dirty(inode);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -81,7 +81,7 @@ struct rfkill_event {
|
|||
__u8 type;
|
||||
__u8 op;
|
||||
__u8 soft, hard;
|
||||
} __packed;
|
||||
} __attribute__((packed));
|
||||
|
||||
/*
|
||||
* We are planning to be backward and forward compatible with changes
|
||||
|
|
|
|||
|
|
@ -2109,7 +2109,9 @@ extern void daemonize(const char *, ...);
|
|||
extern int allow_signal(int);
|
||||
extern int disallow_signal(int);
|
||||
|
||||
extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *);
|
||||
extern int do_execve(const char *,
|
||||
const char __user * const __user *,
|
||||
const char __user * const __user *, struct pt_regs *);
|
||||
extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *);
|
||||
struct task_struct *fork_idle(int);
|
||||
|
||||
|
|
|
|||
|
|
@ -26,6 +26,9 @@ struct semaphore {
|
|||
.wait_list = LIST_HEAD_INIT((name).wait_list), \
|
||||
}
|
||||
|
||||
#define DEFINE_SEMAPHORE(name) \
|
||||
struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
|
||||
|
||||
#define DECLARE_MUTEX(name) \
|
||||
struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
|
||||
|
||||
|
|
|
|||
|
|
@ -77,8 +77,7 @@ struct serial_struct {
|
|||
#define PORT_16654 11
|
||||
#define PORT_16850 12
|
||||
#define PORT_RSA 13 /* RSA-DV II/S card */
|
||||
#define PORT_U6_16550A 14
|
||||
#define PORT_MAX 14
|
||||
#define PORT_MAX 13
|
||||
|
||||
#define SERIAL_IO_PORT 0
|
||||
#define SERIAL_IO_HUB6 1
|
||||
|
|
|
|||
|
|
@ -44,7 +44,8 @@
|
|||
#define PORT_RM9000 16 /* PMC-Sierra RM9xxx internal UART */
|
||||
#define PORT_OCTEON 17 /* Cavium OCTEON internal UART */
|
||||
#define PORT_AR7 18 /* Texas Instruments AR7 internal UART */
|
||||
#define PORT_MAX_8250 18 /* max port ID */
|
||||
#define PORT_U6_16550A 19 /* ST-Ericsson U6xxx internal UART */
|
||||
#define PORT_MAX_8250 19 /* max port ID */
|
||||
|
||||
/*
|
||||
* ARM specific type numbers. These are not currently guaranteed
|
||||
|
|
@ -465,7 +466,7 @@ uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
|
|||
#ifdef SUPPORT_SYSRQ
|
||||
if (port->sysrq) {
|
||||
if (ch && time_before(jiffies, port->sysrq)) {
|
||||
handle_sysrq(ch, port->state->port.tty);
|
||||
handle_sysrq(ch);
|
||||
port->sysrq = 0;
|
||||
return 1;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -68,7 +68,7 @@ struct kmem_cache_order_objects {
|
|||
* Slab cache management.
|
||||
*/
|
||||
struct kmem_cache {
|
||||
struct kmem_cache_cpu *cpu_slab;
|
||||
struct kmem_cache_cpu __percpu *cpu_slab;
|
||||
/* Used for retriving partial slabs etc */
|
||||
unsigned long flags;
|
||||
int size; /* The size of an object including meta data */
|
||||
|
|
|
|||
|
|
@ -322,7 +322,7 @@ extern int csum_partial_copy_fromiovecend(unsigned char *kdata,
|
|||
int offset,
|
||||
unsigned int len, __wsum *csump);
|
||||
|
||||
extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode);
|
||||
extern long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode);
|
||||
extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len);
|
||||
extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
|
||||
int offset, int len);
|
||||
|
|
|
|||
|
|
@ -14,7 +14,9 @@
|
|||
#define SPI_MODE_OFFSET 6
|
||||
#define SPI_SCPH_OFFSET 6
|
||||
#define SPI_SCOL_OFFSET 7
|
||||
|
||||
#define SPI_TMOD_OFFSET 8
|
||||
#define SPI_TMOD_MASK (0x3 << SPI_TMOD_OFFSET)
|
||||
#define SPI_TMOD_TR 0x0 /* xmit & recv */
|
||||
#define SPI_TMOD_TO 0x1 /* xmit only */
|
||||
#define SPI_TMOD_RO 0x2 /* recv only */
|
||||
|
|
|
|||
|
|
@ -213,6 +213,9 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
|
|||
* @dma_alignment: SPI controller constraint on DMA buffers alignment.
|
||||
* @mode_bits: flags understood by this controller driver
|
||||
* @flags: other constraints relevant to this driver
|
||||
* @bus_lock_spinlock: spinlock for SPI bus locking
|
||||
* @bus_lock_mutex: mutex for SPI bus locking
|
||||
* @bus_lock_flag: indicates that the SPI bus is locked for exclusive use
|
||||
* @setup: updates the device mode and clocking records used by a
|
||||
* device's SPI controller; protocol code may call this. This
|
||||
* must fail if an unrecognized or unsupported mode is requested.
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ struct rpc_inode;
|
|||
* The high-level client handle
|
||||
*/
|
||||
struct rpc_clnt {
|
||||
struct kref cl_kref; /* Number of references */
|
||||
atomic_t cl_count; /* Number of references */
|
||||
struct list_head cl_clients; /* Global list of clients */
|
||||
struct list_head cl_tasks; /* List of tasks */
|
||||
spinlock_t cl_lock; /* spinlock */
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@ struct bio;
|
|||
#define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
|
||||
#define SWAP_FLAG_PRIO_MASK 0x7fff
|
||||
#define SWAP_FLAG_PRIO_SHIFT 0
|
||||
#define SWAP_FLAG_DISCARD 0x10000 /* discard swap cluster after use */
|
||||
|
||||
static inline int current_is_kswapd(void)
|
||||
{
|
||||
|
|
@ -142,7 +143,7 @@ struct swap_extent {
|
|||
enum {
|
||||
SWP_USED = (1 << 0), /* is slot in swap_info[] used? */
|
||||
SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */
|
||||
SWP_DISCARDABLE = (1 << 2), /* blkdev supports discard */
|
||||
SWP_DISCARDABLE = (1 << 2), /* swapon+blkdev support discard */
|
||||
SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */
|
||||
SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */
|
||||
SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */
|
||||
|
|
@ -315,6 +316,7 @@ extern long nr_swap_pages;
|
|||
extern long total_swap_pages;
|
||||
extern void si_swapinfo(struct sysinfo *);
|
||||
extern swp_entry_t get_swap_page(void);
|
||||
extern swp_entry_t get_swap_page_of_type(int);
|
||||
extern int valid_swaphandles(swp_entry_t, unsigned long *);
|
||||
extern int add_swap_count_continuation(swp_entry_t, gfp_t);
|
||||
extern void swap_shmem_alloc(swp_entry_t);
|
||||
|
|
@ -331,13 +333,6 @@ extern int reuse_swap_page(struct page *);
|
|||
extern int try_to_free_swap(struct page *);
|
||||
struct backing_dev_info;
|
||||
|
||||
#ifdef CONFIG_HIBERNATION
|
||||
void hibernation_freeze_swap(void);
|
||||
void hibernation_thaw_swap(void);
|
||||
swp_entry_t get_swap_for_hibernation(int type);
|
||||
void swap_free_for_hibernation(swp_entry_t val);
|
||||
#endif
|
||||
|
||||
/* linux/mm/thrash.c */
|
||||
extern struct mm_struct *swap_token_mm;
|
||||
extern void grab_swap_token(struct mm_struct *);
|
||||
|
|
|
|||
|
|
@ -820,7 +820,7 @@ asmlinkage long sys_fanotify_mark(int fanotify_fd, unsigned int flags,
|
|||
u64 mask, int fd,
|
||||
const char __user *pathname);
|
||||
|
||||
int kernel_execve(const char *filename, char *const argv[], char *const envp[]);
|
||||
int kernel_execve(const char *filename, const char *const argv[], const char *const envp[]);
|
||||
|
||||
|
||||
asmlinkage long sys_perf_event_open(
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@
|
|||
#include <linux/errno.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/lockdep.h>
|
||||
#include <linux/kobject_ns.h>
|
||||
#include <asm/atomic.h>
|
||||
|
||||
struct kobject;
|
||||
|
|
|
|||
|
|
@ -15,9 +15,7 @@
|
|||
#define _LINUX_SYSRQ_H
|
||||
|
||||
#include <linux/errno.h>
|
||||
|
||||
struct pt_regs;
|
||||
struct tty_struct;
|
||||
#include <linux/types.h>
|
||||
|
||||
/* Possible values of bitmask for enabling sysrq functions */
|
||||
/* 0x0001 is reserved for enable everything */
|
||||
|
|
@ -31,7 +29,7 @@ struct tty_struct;
|
|||
#define SYSRQ_ENABLE_RTNICE 0x0100
|
||||
|
||||
struct sysrq_key_op {
|
||||
void (*handler)(int, struct tty_struct *);
|
||||
void (*handler)(int);
|
||||
char *help_msg;
|
||||
char *action_msg;
|
||||
int enable_mask;
|
||||
|
|
@ -44,8 +42,8 @@ struct sysrq_key_op {
|
|||
* are available -- else NULL's).
|
||||
*/
|
||||
|
||||
void handle_sysrq(int key, struct tty_struct *tty);
|
||||
void __handle_sysrq(int key, struct tty_struct *tty, int check_mask);
|
||||
void handle_sysrq(int key);
|
||||
void __handle_sysrq(int key, bool check_mask);
|
||||
int register_sysrq_key(int key, struct sysrq_key_op *op);
|
||||
int unregister_sysrq_key(int key, struct sysrq_key_op *op);
|
||||
struct sysrq_key_op *__sysrq_get_key_op(int key);
|
||||
|
|
@ -54,7 +52,11 @@ int sysrq_toggle_support(int enable_mask);
|
|||
|
||||
#else
|
||||
|
||||
static inline void handle_sysrq(int key, struct tty_struct *tty)
|
||||
static inline void handle_sysrq(int key)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void __handle_sysrq(int key, bool check_mask)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -329,6 +329,13 @@ struct tty_struct {
|
|||
struct tty_port *port;
|
||||
};
|
||||
|
||||
/* Each of a tty's open files has private_data pointing to tty_file_private */
|
||||
struct tty_file_private {
|
||||
struct tty_struct *tty;
|
||||
struct file *file;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
/* tty magic number */
|
||||
#define TTY_MAGIC 0x5401
|
||||
|
||||
|
|
@ -458,6 +465,7 @@ extern void proc_clear_tty(struct task_struct *p);
|
|||
extern struct tty_struct *get_current_tty(void);
|
||||
extern void tty_default_fops(struct file_operations *fops);
|
||||
extern struct tty_struct *alloc_tty_struct(void);
|
||||
extern void tty_add_file(struct tty_struct *tty, struct file *file);
|
||||
extern void free_tty_struct(struct tty_struct *tty);
|
||||
extern void initialize_tty_struct(struct tty_struct *tty,
|
||||
struct tty_driver *driver, int idx);
|
||||
|
|
@ -470,6 +478,7 @@ extern struct tty_struct *tty_pair_get_tty(struct tty_struct *tty);
|
|||
extern struct tty_struct *tty_pair_get_pty(struct tty_struct *tty);
|
||||
|
||||
extern struct mutex tty_mutex;
|
||||
extern spinlock_t tty_files_lock;
|
||||
|
||||
extern void tty_write_unlock(struct tty_struct *tty);
|
||||
extern int tty_write_lock(struct tty_struct *tty, int ndelay);
|
||||
|
|
|
|||
|
|
@ -37,7 +37,6 @@
|
|||
#define UINPUT_VERSION 3
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#define UINPUT_MINOR 223
|
||||
#define UINPUT_NAME "uinput"
|
||||
#define UINPUT_BUFFER_SIZE 16
|
||||
#define UINPUT_NUM_REQUESTS 16
|
||||
|
|
|
|||
|
|
@ -247,6 +247,7 @@ int usb_add_config(struct usb_composite_dev *,
|
|||
* value; it should return zero on successful initialization.
|
||||
* @unbind: Reverses @bind(); called as a side effect of unregistering
|
||||
* this driver.
|
||||
* @disconnect: optional driver disconnect method
|
||||
* @suspend: Notifies when the host stops sending USB traffic,
|
||||
* after function notifications
|
||||
* @resume: Notifies configuration when the host restarts USB traffic,
|
||||
|
|
|
|||
|
|
@ -342,8 +342,7 @@ extern int usb_serial_generic_submit_read_urb(struct usb_serial_port *port,
|
|||
extern void usb_serial_generic_process_read_urb(struct urb *urb);
|
||||
extern int usb_serial_generic_prepare_write_buffer(struct usb_serial_port *port,
|
||||
void *dest, size_t size);
|
||||
extern int usb_serial_handle_sysrq_char(struct tty_struct *tty,
|
||||
struct usb_serial_port *port,
|
||||
extern int usb_serial_handle_sysrq_char(struct usb_serial_port *port,
|
||||
unsigned int ch);
|
||||
extern int usb_serial_handle_break(struct usb_serial_port *port);
|
||||
|
||||
|
|
|
|||
|
|
@ -93,8 +93,11 @@ extern void vga_set_legacy_decoding(struct pci_dev *pdev,
|
|||
* Nested calls are supported (a per-resource counter is maintained)
|
||||
*/
|
||||
|
||||
extern int vga_get(struct pci_dev *pdev, unsigned int rsrc,
|
||||
int interruptible);
|
||||
#if defined(CONFIG_VGA_ARB)
|
||||
extern int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible);
|
||||
#else
|
||||
static inline int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible) { return 0; }
|
||||
#endif
|
||||
|
||||
/**
|
||||
* vga_get_interruptible
|
||||
|
|
@ -131,7 +134,11 @@ static inline int vga_get_uninterruptible(struct pci_dev *pdev,
|
|||
* are already locked by another card. It can be called in any context
|
||||
*/
|
||||
|
||||
#if defined(CONFIG_VGA_ARB)
|
||||
extern int vga_tryget(struct pci_dev *pdev, unsigned int rsrc);
|
||||
#else
|
||||
static inline int vga_tryget(struct pci_dev *pdev, unsigned int rsrc) { return 0; }
|
||||
#endif
|
||||
|
||||
/**
|
||||
* vga_put - release lock on legacy VGA resources
|
||||
|
|
@ -146,7 +153,11 @@ extern int vga_tryget(struct pci_dev *pdev, unsigned int rsrc);
|
|||
* released if the counter reaches 0.
|
||||
*/
|
||||
|
||||
#if defined(CONFIG_VGA_ARB)
|
||||
extern void vga_put(struct pci_dev *pdev, unsigned int rsrc);
|
||||
#else
|
||||
#define vga_put(pdev, rsrc)
|
||||
#endif
|
||||
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -170,6 +170,28 @@ static inline unsigned long zone_page_state(struct zone *zone,
|
|||
return x;
|
||||
}
|
||||
|
||||
/*
|
||||
* More accurate version that also considers the currently pending
|
||||
* deltas. For that we need to loop over all cpus to find the current
|
||||
* deltas. There is no synchronization so the result cannot be
|
||||
* exactly accurate either.
|
||||
*/
|
||||
static inline unsigned long zone_page_state_snapshot(struct zone *zone,
|
||||
enum zone_stat_item item)
|
||||
{
|
||||
long x = atomic_long_read(&zone->vm_stat[item]);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
int cpu;
|
||||
for_each_online_cpu(cpu)
|
||||
x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
|
||||
|
||||
if (x < 0)
|
||||
x = 0;
|
||||
#endif
|
||||
return x;
|
||||
}
|
||||
|
||||
extern unsigned long global_reclaimable_pages(void);
|
||||
extern unsigned long zone_reclaimable_pages(struct zone *zone);
|
||||
|
||||
|
|
|
|||
|
|
@ -25,18 +25,20 @@ typedef void (*work_func_t)(struct work_struct *work);
|
|||
|
||||
enum {
|
||||
WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
|
||||
WORK_STRUCT_CWQ_BIT = 1, /* data points to cwq */
|
||||
WORK_STRUCT_LINKED_BIT = 2, /* next work is linked to this one */
|
||||
WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */
|
||||
WORK_STRUCT_CWQ_BIT = 2, /* data points to cwq */
|
||||
WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */
|
||||
#ifdef CONFIG_DEBUG_OBJECTS_WORK
|
||||
WORK_STRUCT_STATIC_BIT = 3, /* static initializer (debugobjects) */
|
||||
WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */
|
||||
WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */
|
||||
WORK_STRUCT_COLOR_SHIFT = 5, /* color for workqueue flushing */
|
||||
#else
|
||||
WORK_STRUCT_COLOR_SHIFT = 3, /* color for workqueue flushing */
|
||||
WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */
|
||||
#endif
|
||||
|
||||
WORK_STRUCT_COLOR_BITS = 4,
|
||||
|
||||
WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
|
||||
WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT,
|
||||
WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT,
|
||||
WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
|
||||
#ifdef CONFIG_DEBUG_OBJECTS_WORK
|
||||
|
|
@ -59,8 +61,8 @@ enum {
|
|||
|
||||
/*
|
||||
* Reserve 7 bits off of cwq pointer w/ debugobjects turned
|
||||
* off. This makes cwqs aligned to 128 bytes which isn't too
|
||||
* excessive while allowing 15 workqueue flush colors.
|
||||
* off. This makes cwqs aligned to 256 bytes and allows 15
|
||||
* workqueue flush colors.
|
||||
*/
|
||||
WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
|
||||
WORK_STRUCT_COLOR_BITS,
|
||||
|
|
@ -233,6 +235,10 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
|
|||
#define work_clear_pending(work) \
|
||||
clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
|
||||
|
||||
/*
|
||||
* Workqueue flags and constants. For details, please refer to
|
||||
* Documentation/workqueue.txt.
|
||||
*/
|
||||
enum {
|
||||
WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */
|
||||
WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
|
||||
|
|
@ -241,6 +247,8 @@ enum {
|
|||
WQ_HIGHPRI = 1 << 4, /* high priority */
|
||||
WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
|
||||
|
||||
WQ_DYING = 1 << 6, /* internal: workqueue is dying */
|
||||
|
||||
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
|
||||
WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
|
||||
WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue