Merge rsync://rsync.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
This commit is contained in:
commit
0612ec4876
2417 changed files with 59491 additions and 27087 deletions
|
|
@ -7,7 +7,6 @@
|
|||
#define LINUX_ATMDEV_H
|
||||
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/atmapi.h>
|
||||
#include <linux/atm.h>
|
||||
#include <linux/atmioc.h>
|
||||
|
|
@ -210,6 +209,7 @@ struct atm_cirange {
|
|||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/wait.h> /* wait_queue_head_t */
|
||||
#include <linux/time.h> /* struct timeval */
|
||||
#include <linux/net.h>
|
||||
|
|
|
|||
|
|
@ -132,6 +132,10 @@
|
|||
#define AUDIT_CLASS_DIR_WRITE_32 1
|
||||
#define AUDIT_CLASS_CHATTR 2
|
||||
#define AUDIT_CLASS_CHATTR_32 3
|
||||
#define AUDIT_CLASS_READ 4
|
||||
#define AUDIT_CLASS_READ_32 5
|
||||
#define AUDIT_CLASS_WRITE 6
|
||||
#define AUDIT_CLASS_WRITE_32 7
|
||||
|
||||
/* This bitmask is used to validate user input. It represents all bits that
|
||||
* are currently used in an audit field constant understood by the kernel.
|
||||
|
|
@ -177,6 +181,7 @@
|
|||
#define AUDIT_EXIT 103
|
||||
#define AUDIT_SUCCESS 104 /* exit >= 0; value ignored */
|
||||
#define AUDIT_WATCH 105
|
||||
#define AUDIT_PERM 106
|
||||
|
||||
#define AUDIT_ARG0 200
|
||||
#define AUDIT_ARG1 (AUDIT_ARG0+1)
|
||||
|
|
@ -252,6 +257,11 @@
|
|||
#define AUDIT_ARCH_V850 (EM_V850|__AUDIT_ARCH_LE)
|
||||
#define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
|
||||
|
||||
#define AUDIT_PERM_EXEC 1
|
||||
#define AUDIT_PERM_WRITE 2
|
||||
#define AUDIT_PERM_READ 4
|
||||
#define AUDIT_PERM_ATTR 8
|
||||
|
||||
struct audit_status {
|
||||
__u32 mask; /* Bit mask for valid entries */
|
||||
__u32 enabled; /* 1 = enabled, 0 = disabled */
|
||||
|
|
@ -314,6 +324,7 @@ struct mqstat;
|
|||
#define AUDITSC_FAILURE 2
|
||||
#define AUDITSC_RESULT(x) ( ((long)(x))<0?AUDITSC_FAILURE:AUDITSC_SUCCESS )
|
||||
extern int __init audit_register_class(int class, unsigned *list);
|
||||
extern int audit_classify_syscall(int abi, unsigned syscall);
|
||||
#ifdef CONFIG_AUDITSYSCALL
|
||||
/* These are defined in auditsc.c */
|
||||
/* Public API */
|
||||
|
|
@ -327,21 +338,31 @@ extern void __audit_getname(const char *name);
|
|||
extern void audit_putname(const char *name);
|
||||
extern void __audit_inode(const char *name, const struct inode *inode);
|
||||
extern void __audit_inode_child(const char *dname, const struct inode *inode,
|
||||
unsigned long pino);
|
||||
const struct inode *parent);
|
||||
extern void __audit_inode_update(const struct inode *inode);
|
||||
static inline int audit_dummy_context(void)
|
||||
{
|
||||
void *p = current->audit_context;
|
||||
return !p || *(int *)p;
|
||||
}
|
||||
static inline void audit_getname(const char *name)
|
||||
{
|
||||
if (unlikely(current->audit_context))
|
||||
if (unlikely(!audit_dummy_context()))
|
||||
__audit_getname(name);
|
||||
}
|
||||
static inline void audit_inode(const char *name, const struct inode *inode) {
|
||||
if (unlikely(current->audit_context))
|
||||
if (unlikely(!audit_dummy_context()))
|
||||
__audit_inode(name, inode);
|
||||
}
|
||||
static inline void audit_inode_child(const char *dname,
|
||||
const struct inode *inode,
|
||||
unsigned long pino) {
|
||||
if (unlikely(current->audit_context))
|
||||
__audit_inode_child(dname, inode, pino);
|
||||
const struct inode *inode,
|
||||
const struct inode *parent) {
|
||||
if (unlikely(!audit_dummy_context()))
|
||||
__audit_inode_child(dname, inode, parent);
|
||||
}
|
||||
static inline void audit_inode_update(const struct inode *inode) {
|
||||
if (unlikely(!audit_dummy_context()))
|
||||
__audit_inode_update(inode);
|
||||
}
|
||||
|
||||
/* Private API (for audit.c only) */
|
||||
|
|
@ -365,57 +386,61 @@ extern int __audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat);
|
|||
|
||||
static inline int audit_ipc_obj(struct kern_ipc_perm *ipcp)
|
||||
{
|
||||
if (unlikely(current->audit_context))
|
||||
if (unlikely(!audit_dummy_context()))
|
||||
return __audit_ipc_obj(ipcp);
|
||||
return 0;
|
||||
}
|
||||
static inline int audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode)
|
||||
{
|
||||
if (unlikely(current->audit_context))
|
||||
if (unlikely(!audit_dummy_context()))
|
||||
return __audit_ipc_set_perm(qbytes, uid, gid, mode);
|
||||
return 0;
|
||||
}
|
||||
static inline int audit_mq_open(int oflag, mode_t mode, struct mq_attr __user *u_attr)
|
||||
{
|
||||
if (unlikely(current->audit_context))
|
||||
if (unlikely(!audit_dummy_context()))
|
||||
return __audit_mq_open(oflag, mode, u_attr);
|
||||
return 0;
|
||||
}
|
||||
static inline int audit_mq_timedsend(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec __user *u_abs_timeout)
|
||||
{
|
||||
if (unlikely(current->audit_context))
|
||||
if (unlikely(!audit_dummy_context()))
|
||||
return __audit_mq_timedsend(mqdes, msg_len, msg_prio, u_abs_timeout);
|
||||
return 0;
|
||||
}
|
||||
static inline int audit_mq_timedreceive(mqd_t mqdes, size_t msg_len, unsigned int __user *u_msg_prio, const struct timespec __user *u_abs_timeout)
|
||||
{
|
||||
if (unlikely(current->audit_context))
|
||||
if (unlikely(!audit_dummy_context()))
|
||||
return __audit_mq_timedreceive(mqdes, msg_len, u_msg_prio, u_abs_timeout);
|
||||
return 0;
|
||||
}
|
||||
static inline int audit_mq_notify(mqd_t mqdes, const struct sigevent __user *u_notification)
|
||||
{
|
||||
if (unlikely(current->audit_context))
|
||||
if (unlikely(!audit_dummy_context()))
|
||||
return __audit_mq_notify(mqdes, u_notification);
|
||||
return 0;
|
||||
}
|
||||
static inline int audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat)
|
||||
{
|
||||
if (unlikely(current->audit_context))
|
||||
if (unlikely(!audit_dummy_context()))
|
||||
return __audit_mq_getsetattr(mqdes, mqstat);
|
||||
return 0;
|
||||
}
|
||||
extern int audit_n_rules;
|
||||
#else
|
||||
#define audit_alloc(t) ({ 0; })
|
||||
#define audit_free(t) do { ; } while (0)
|
||||
#define audit_syscall_entry(ta,a,b,c,d,e) do { ; } while (0)
|
||||
#define audit_syscall_exit(f,r) do { ; } while (0)
|
||||
#define audit_dummy_context() 1
|
||||
#define audit_getname(n) do { ; } while (0)
|
||||
#define audit_putname(n) do { ; } while (0)
|
||||
#define __audit_inode(n,i) do { ; } while (0)
|
||||
#define __audit_inode_child(d,i,p) do { ; } while (0)
|
||||
#define __audit_inode_update(i) do { ; } while (0)
|
||||
#define audit_inode(n,i) do { ; } while (0)
|
||||
#define audit_inode_child(d,i,p) do { ; } while (0)
|
||||
#define audit_inode_update(i) do { ; } while (0)
|
||||
#define auditsc_get_stamp(c,t,s) do { BUG(); } while (0)
|
||||
#define audit_get_loginuid(c) ({ -1; })
|
||||
#define audit_ipc_obj(i) ({ 0; })
|
||||
|
|
@ -430,6 +455,7 @@ static inline int audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat)
|
|||
#define audit_mq_timedreceive(d,l,p,t) ({ 0; })
|
||||
#define audit_mq_notify(d,n) ({ 0; })
|
||||
#define audit_mq_getsetattr(d,s) ({ 0; })
|
||||
#define audit_n_rules 0
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_AUDIT
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ enum blktrace_cat {
|
|||
BLK_TC_READ = 1 << 0, /* reads */
|
||||
BLK_TC_WRITE = 1 << 1, /* writes */
|
||||
BLK_TC_BARRIER = 1 << 2, /* barrier */
|
||||
BLK_TC_SYNC = 1 << 3, /* barrier */
|
||||
BLK_TC_SYNC = 1 << 3, /* sync IO */
|
||||
BLK_TC_QUEUE = 1 << 4, /* queueing/merging */
|
||||
BLK_TC_REQUEUE = 1 << 5, /* requeueing */
|
||||
BLK_TC_ISSUE = 1 << 6, /* issue */
|
||||
|
|
@ -19,6 +19,7 @@ enum blktrace_cat {
|
|||
BLK_TC_FS = 1 << 8, /* fs requests */
|
||||
BLK_TC_PC = 1 << 9, /* pc requests */
|
||||
BLK_TC_NOTIFY = 1 << 10, /* special message */
|
||||
BLK_TC_AHEAD = 1 << 11, /* readahead */
|
||||
|
||||
BLK_TC_END = 1 << 15, /* only 16-bits, reminder */
|
||||
};
|
||||
|
|
@ -147,7 +148,7 @@ static inline void blk_add_trace_rq(struct request_queue *q, struct request *rq,
|
|||
u32 what)
|
||||
{
|
||||
struct blk_trace *bt = q->blk_trace;
|
||||
int rw = rq->flags & 0x07;
|
||||
int rw = rq->flags & 0x03;
|
||||
|
||||
if (likely(!bt))
|
||||
return;
|
||||
|
|
|
|||
|
|
@ -91,7 +91,7 @@ static inline void *alloc_remap(int nid, unsigned long size)
|
|||
}
|
||||
#endif
|
||||
|
||||
extern unsigned long nr_kernel_pages;
|
||||
extern unsigned long __meminitdata nr_kernel_pages;
|
||||
extern unsigned long nr_all_pages;
|
||||
|
||||
extern void *__init alloc_large_system_hash(const char *tablename,
|
||||
|
|
|
|||
|
|
@ -57,7 +57,8 @@ struct proc_event {
|
|||
PROC_EVENT_EXIT = 0x80000000
|
||||
} what;
|
||||
__u32 cpu;
|
||||
struct timespec timestamp;
|
||||
__u64 __attribute__((aligned(8))) timestamp_ns;
|
||||
/* Number of nano seconds since system boot */
|
||||
union { /* must be last field of proc_event struct */
|
||||
struct {
|
||||
__u32 err;
|
||||
|
|
|
|||
|
|
@ -216,6 +216,7 @@ COMPATIBLE_IOCTL(VT_RESIZE)
|
|||
COMPATIBLE_IOCTL(VT_RESIZEX)
|
||||
COMPATIBLE_IOCTL(VT_LOCKSWITCH)
|
||||
COMPATIBLE_IOCTL(VT_UNLOCKSWITCH)
|
||||
COMPATIBLE_IOCTL(VT_GETHIFONTMASK)
|
||||
/* Little p (/dev/rtc, /dev/envctrl, etc.) */
|
||||
COMPATIBLE_IOCTL(RTC_AIE_ON)
|
||||
COMPATIBLE_IOCTL(RTC_AIE_OFF)
|
||||
|
|
|
|||
|
|
@ -18,6 +18,9 @@ struct completion {
|
|||
#define COMPLETION_INITIALIZER(work) \
|
||||
{ 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
|
||||
|
||||
#define COMPLETION_INITIALIZER_ONSTACK(work) \
|
||||
({ init_completion(&work); work; })
|
||||
|
||||
#define DECLARE_COMPLETION(work) \
|
||||
struct completion work = COMPLETION_INITIALIZER(work)
|
||||
|
||||
|
|
@ -28,7 +31,7 @@ struct completion {
|
|||
*/
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
# define DECLARE_COMPLETION_ONSTACK(work) \
|
||||
struct completion work = ({ init_completion(&work); work; })
|
||||
struct completion work = COMPLETION_INITIALIZER_ONSTACK(work)
|
||||
#else
|
||||
# define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work)
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@
|
|||
* to achieve effects such as fast scrolling by changing the origin.
|
||||
*/
|
||||
|
||||
#include <linux/wait.h>
|
||||
#include <linux/vt.h>
|
||||
|
||||
struct vt_struct;
|
||||
|
|
|
|||
|
|
@ -48,7 +48,6 @@ static inline void unregister_cpu_notifier(struct notifier_block *nb)
|
|||
{
|
||||
}
|
||||
#endif
|
||||
extern int current_in_cpu_hotplug(void);
|
||||
|
||||
int cpu_up(unsigned int cpu);
|
||||
|
||||
|
|
@ -61,10 +60,6 @@ static inline int register_cpu_notifier(struct notifier_block *nb)
|
|||
static inline void unregister_cpu_notifier(struct notifier_block *nb)
|
||||
{
|
||||
}
|
||||
static inline int current_in_cpu_hotplug(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
extern struct sysdev_class cpu_sysdev_class;
|
||||
|
|
@ -73,7 +68,6 @@ extern struct sysdev_class cpu_sysdev_class;
|
|||
/* Stop CPUs going up and down. */
|
||||
extern void lock_cpu_hotplug(void);
|
||||
extern void unlock_cpu_hotplug(void);
|
||||
extern int lock_cpu_hotplug_interruptible(void);
|
||||
#define hotcpu_notifier(fn, pri) { \
|
||||
static struct notifier_block fn##_nb = \
|
||||
{ .notifier_call = fn, .priority = pri }; \
|
||||
|
|
|
|||
|
|
@ -172,9 +172,6 @@ extern int __cpufreq_driver_target(struct cpufreq_policy *policy,
|
|||
unsigned int relation);
|
||||
|
||||
|
||||
/* pass an event to the cpufreq governor */
|
||||
int cpufreq_governor(unsigned int cpu, unsigned int event);
|
||||
|
||||
int cpufreq_register_governor(struct cpufreq_governor *governor);
|
||||
void cpufreq_unregister_governor(struct cpufreq_governor *governor);
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,8 @@
|
|||
#ifndef __LINUX_DEBUG_LOCKING_H
|
||||
#define __LINUX_DEBUG_LOCKING_H
|
||||
|
||||
struct task_struct;
|
||||
|
||||
extern int debug_locks;
|
||||
extern int debug_locks_silent;
|
||||
|
||||
|
|
|
|||
121
include/linux/delayacct.h
Normal file
121
include/linux/delayacct.h
Normal file
|
|
@ -0,0 +1,121 @@
|
|||
/* delayacct.h - per-task delay accounting
|
||||
*
|
||||
* Copyright (C) Shailabh Nagar, IBM Corp. 2006
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
|
||||
* the GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_DELAYACCT_H
|
||||
#define _LINUX_DELAYACCT_H
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/taskstats_kern.h>
|
||||
|
||||
/*
|
||||
* Per-task flags relevant to delay accounting
|
||||
* maintained privately to avoid exhausting similar flags in sched.h:PF_*
|
||||
* Used to set current->delays->flags
|
||||
*/
|
||||
#define DELAYACCT_PF_SWAPIN 0x00000001 /* I am doing a swapin */
|
||||
|
||||
#ifdef CONFIG_TASK_DELAY_ACCT
|
||||
|
||||
extern int delayacct_on; /* Delay accounting turned on/off */
|
||||
extern kmem_cache_t *delayacct_cache;
|
||||
extern void delayacct_init(void);
|
||||
extern void __delayacct_tsk_init(struct task_struct *);
|
||||
extern void __delayacct_tsk_exit(struct task_struct *);
|
||||
extern void __delayacct_blkio_start(void);
|
||||
extern void __delayacct_blkio_end(void);
|
||||
extern int __delayacct_add_tsk(struct taskstats *, struct task_struct *);
|
||||
extern __u64 __delayacct_blkio_ticks(struct task_struct *);
|
||||
|
||||
static inline void delayacct_set_flag(int flag)
|
||||
{
|
||||
if (current->delays)
|
||||
current->delays->flags |= flag;
|
||||
}
|
||||
|
||||
static inline void delayacct_clear_flag(int flag)
|
||||
{
|
||||
if (current->delays)
|
||||
current->delays->flags &= ~flag;
|
||||
}
|
||||
|
||||
static inline void delayacct_tsk_init(struct task_struct *tsk)
|
||||
{
|
||||
/* reinitialize in case parent's non-null pointer was dup'ed*/
|
||||
tsk->delays = NULL;
|
||||
if (delayacct_on)
|
||||
__delayacct_tsk_init(tsk);
|
||||
}
|
||||
|
||||
/* Free tsk->delays. Called from bad fork and __put_task_struct
|
||||
* where there's no risk of tsk->delays being accessed elsewhere
|
||||
*/
|
||||
static inline void delayacct_tsk_free(struct task_struct *tsk)
|
||||
{
|
||||
if (tsk->delays)
|
||||
kmem_cache_free(delayacct_cache, tsk->delays);
|
||||
tsk->delays = NULL;
|
||||
}
|
||||
|
||||
static inline void delayacct_blkio_start(void)
|
||||
{
|
||||
if (current->delays)
|
||||
__delayacct_blkio_start();
|
||||
}
|
||||
|
||||
static inline void delayacct_blkio_end(void)
|
||||
{
|
||||
if (current->delays)
|
||||
__delayacct_blkio_end();
|
||||
}
|
||||
|
||||
static inline int delayacct_add_tsk(struct taskstats *d,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
if (!delayacct_on || !tsk->delays)
|
||||
return 0;
|
||||
return __delayacct_add_tsk(d, tsk);
|
||||
}
|
||||
|
||||
static inline __u64 delayacct_blkio_ticks(struct task_struct *tsk)
|
||||
{
|
||||
if (tsk->delays)
|
||||
return __delayacct_blkio_ticks(tsk);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#else
|
||||
static inline void delayacct_set_flag(int flag)
|
||||
{}
|
||||
static inline void delayacct_clear_flag(int flag)
|
||||
{}
|
||||
static inline void delayacct_init(void)
|
||||
{}
|
||||
static inline void delayacct_tsk_init(struct task_struct *tsk)
|
||||
{}
|
||||
static inline void delayacct_tsk_free(struct task_struct *tsk)
|
||||
{}
|
||||
static inline void delayacct_blkio_start(void)
|
||||
{}
|
||||
static inline void delayacct_blkio_end(void)
|
||||
{}
|
||||
static inline int delayacct_add_tsk(struct taskstats *d,
|
||||
struct task_struct *tsk)
|
||||
{ return 0; }
|
||||
static inline __u64 delayacct_blkio_ticks(struct task_struct *tsk)
|
||||
{ return 0; }
|
||||
#endif /* CONFIG_TASK_DELAY_ACCT */
|
||||
|
||||
#endif
|
||||
|
|
@ -60,6 +60,16 @@ struct elf_prstatus
|
|||
long pr_instr; /* Current instruction */
|
||||
#endif
|
||||
elf_gregset_t pr_reg; /* GP registers */
|
||||
#ifdef CONFIG_BINFMT_ELF_FDPIC
|
||||
/* When using FDPIC, the loadmap addresses need to be communicated
|
||||
* to GDB in order for GDB to do the necessary relocations. The
|
||||
* fields (below) used to communicate this information are placed
|
||||
* immediately after ``pr_reg'', so that the loadmap addresses may
|
||||
* be viewed as part of the register set if so desired.
|
||||
*/
|
||||
unsigned long pr_exec_fdpic_loadmap;
|
||||
unsigned long pr_interp_fdpic_loadmap;
|
||||
#endif
|
||||
int pr_fpvalid; /* True if math co-processor being used. */
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -492,6 +492,15 @@ static inline struct ext3_inode_info *EXT3_I(struct inode *inode)
|
|||
{
|
||||
return container_of(inode, struct ext3_inode_info, vfs_inode);
|
||||
}
|
||||
|
||||
static inline int ext3_valid_inum(struct super_block *sb, unsigned long ino)
|
||||
{
|
||||
return ino == EXT3_ROOT_INO ||
|
||||
ino == EXT3_JOURNAL_INO ||
|
||||
ino == EXT3_RESIZE_INO ||
|
||||
(ino >= EXT3_FIRST_INO(sb) &&
|
||||
ino <= le32_to_cpu(EXT3_SB(sb)->s_es->s_inodes_count));
|
||||
}
|
||||
#else
|
||||
/* Assume that user mode programs are passing in an ext3fs superblock, not
|
||||
* a kernel struct super_block. This will allow us to call the feature-test
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
#ifndef _LINUX_FB_H
|
||||
#define _LINUX_FB_H
|
||||
|
||||
#include <linux/backlight.h>
|
||||
#include <asm/types.h>
|
||||
|
||||
/* Definitions of frame buffers */
|
||||
|
|
@ -377,11 +376,11 @@ struct fb_cursor {
|
|||
|
||||
#include <linux/fs.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/tty.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/backlight.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
struct vm_area_struct;
|
||||
|
|
@ -525,7 +524,7 @@ struct fb_event {
|
|||
|
||||
extern int fb_register_client(struct notifier_block *nb);
|
||||
extern int fb_unregister_client(struct notifier_block *nb);
|
||||
|
||||
extern int fb_notifier_call_chain(unsigned long val, void *v);
|
||||
/*
|
||||
* Pixmap structure definition
|
||||
*
|
||||
|
|
|
|||
|
|
@ -27,6 +27,10 @@
|
|||
#define BLOCK_SIZE_BITS 10
|
||||
#define BLOCK_SIZE (1<<BLOCK_SIZE_BITS)
|
||||
|
||||
#define SEEK_SET 0 /* seek relative to beginning of file */
|
||||
#define SEEK_CUR 1 /* seek relative to current file position */
|
||||
#define SEEK_END 2 /* seek relative to end of file */
|
||||
|
||||
/* And dynamically-tunable limits and defaults: */
|
||||
struct files_stat_struct {
|
||||
int nr_files; /* read only */
|
||||
|
|
@ -566,13 +570,14 @@ struct inode {
|
|||
* 3: quota file
|
||||
*
|
||||
* The locking order between these classes is
|
||||
* parent -> child -> normal -> quota
|
||||
* parent -> child -> normal -> xattr -> quota
|
||||
*/
|
||||
enum inode_i_mutex_lock_class
|
||||
{
|
||||
I_MUTEX_NORMAL,
|
||||
I_MUTEX_PARENT,
|
||||
I_MUTEX_CHILD,
|
||||
I_MUTEX_XATTR,
|
||||
I_MUTEX_QUOTA
|
||||
};
|
||||
|
||||
|
|
@ -716,6 +721,7 @@ extern spinlock_t files_lock;
|
|||
#define FL_POSIX 1
|
||||
#define FL_FLOCK 2
|
||||
#define FL_ACCESS 8 /* not trying to lock, just looking */
|
||||
#define FL_EXISTS 16 /* when unlocking, test for existence */
|
||||
#define FL_LEASE 32 /* lease held on this file */
|
||||
#define FL_CLOSE 64 /* unlock on close */
|
||||
#define FL_SLEEP 128 /* A blocking lock */
|
||||
|
|
|
|||
|
|
@ -69,34 +69,21 @@ enum fs_ioport {
|
|||
fsiop_porte,
|
||||
};
|
||||
|
||||
struct fs_mii_bus_info {
|
||||
int method; /* mii method */
|
||||
int id; /* the id of the mii_bus */
|
||||
int disable_aneg; /* if the controller needs to negothiate speed & duplex */
|
||||
int lpa; /* the default board-specific vallues will be applied otherwise */
|
||||
|
||||
union {
|
||||
struct {
|
||||
int duplex;
|
||||
int speed;
|
||||
} fixed;
|
||||
|
||||
struct {
|
||||
/* nothing */
|
||||
} fec;
|
||||
|
||||
struct {
|
||||
/* nothing */
|
||||
} scc;
|
||||
|
||||
struct {
|
||||
int mdio_port; /* port & bit for MDIO */
|
||||
int mdio_bit;
|
||||
int mdc_port; /* port & bit for MDC */
|
||||
int mdc_bit;
|
||||
int delay; /* delay in us */
|
||||
} bitbang;
|
||||
} i;
|
||||
struct fs_mii_bit {
|
||||
u32 offset;
|
||||
u8 bit;
|
||||
u8 polarity;
|
||||
};
|
||||
struct fs_mii_bb_platform_info {
|
||||
struct fs_mii_bit mdio_dir;
|
||||
struct fs_mii_bit mdio_dat;
|
||||
struct fs_mii_bit mdc_dat;
|
||||
int mdio_port; /* port & bit for MDIO */
|
||||
int mdio_bit;
|
||||
int mdc_port; /* port & bit for MDC */
|
||||
int mdc_bit;
|
||||
int delay; /* delay in us */
|
||||
int irq[32]; /* irqs per phy's */
|
||||
};
|
||||
|
||||
struct fs_platform_info {
|
||||
|
|
@ -119,6 +106,7 @@ struct fs_platform_info {
|
|||
u32 device_flags;
|
||||
|
||||
int phy_addr; /* the phy address (-1 no phy) */
|
||||
const char* bus_id;
|
||||
int phy_irq; /* the phy irq (if it exists) */
|
||||
|
||||
const struct fs_mii_bus_info *bus_info;
|
||||
|
|
@ -130,6 +118,10 @@ struct fs_platform_info {
|
|||
int napi_weight; /* NAPI weight */
|
||||
|
||||
int use_rmii; /* use RMII mode */
|
||||
int has_phy; /* if the network is phy container as well...*/
|
||||
};
|
||||
struct fs_mii_fec_platform_info {
|
||||
u32 irq[32];
|
||||
u32 mii_speed;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -67,7 +67,7 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
|
|||
if (source) {
|
||||
inotify_inode_queue_event(source, IN_MOVE_SELF, 0, NULL, NULL);
|
||||
}
|
||||
audit_inode_child(new_name, source, new_dir->i_ino);
|
||||
audit_inode_child(new_name, source, new_dir);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -98,7 +98,7 @@ static inline void fsnotify_create(struct inode *inode, struct dentry *dentry)
|
|||
inode_dir_notify(inode, DN_CREATE);
|
||||
inotify_inode_queue_event(inode, IN_CREATE, 0, dentry->d_name.name,
|
||||
dentry->d_inode);
|
||||
audit_inode_child(dentry->d_name.name, dentry->d_inode, inode->i_ino);
|
||||
audit_inode_child(dentry->d_name.name, dentry->d_inode, inode);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -109,7 +109,7 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry)
|
|||
inode_dir_notify(inode, DN_CREATE);
|
||||
inotify_inode_queue_event(inode, IN_CREATE | IN_ISDIR, 0,
|
||||
dentry->d_name.name, dentry->d_inode);
|
||||
audit_inode_child(dentry->d_name.name, dentry->d_inode, inode->i_ino);
|
||||
audit_inode_child(dentry->d_name.name, dentry->d_inode, inode);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -96,7 +96,8 @@ struct robust_list_head {
|
|||
long do_futex(u32 __user *uaddr, int op, u32 val, unsigned long timeout,
|
||||
u32 __user *uaddr2, u32 val2, u32 val3);
|
||||
|
||||
extern int handle_futex_death(u32 __user *uaddr, struct task_struct *curr);
|
||||
extern int
|
||||
handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi);
|
||||
|
||||
#ifdef CONFIG_FUTEX
|
||||
extern void exit_robust_list(struct task_struct *curr);
|
||||
|
|
|
|||
|
|
@ -224,8 +224,6 @@ static __inline__ void debug_frame(const struct sk_buff *skb)
|
|||
int hdlc_open(struct net_device *dev);
|
||||
/* Must be called by hardware driver when HDLC device is being closed */
|
||||
void hdlc_close(struct net_device *dev);
|
||||
/* Called by hardware driver when DCD line level changes */
|
||||
void hdlc_set_carrier(int on, struct net_device *dev);
|
||||
|
||||
/* May be used by hardware driver to gain control over HDLC device */
|
||||
static __inline__ void hdlc_proto_detach(hdlc_device *hdlc)
|
||||
|
|
|
|||
|
|
@ -80,6 +80,7 @@ struct hrtimer_sleeper {
|
|||
* @get_softirq_time: function to retrieve the current time from the softirq
|
||||
* @curr_timer: the timer which is executing a callback right now
|
||||
* @softirq_time: the time when running the hrtimer queue in the softirq
|
||||
* @lock_key: the lock_class_key for use with lockdep
|
||||
*/
|
||||
struct hrtimer_base {
|
||||
clockid_t index;
|
||||
|
|
|
|||
|
|
@ -115,6 +115,7 @@
|
|||
#define I2C_DRIVERID_BT866 85 /* Conexant bt866 video encoder */
|
||||
#define I2C_DRIVERID_KS0127 86 /* Samsung ks0127 video decoder */
|
||||
#define I2C_DRIVERID_TLV320AIC23B 87 /* TI TLV320AIC23B audio codec */
|
||||
#define I2C_DRIVERID_ISL1208 88 /* Intersil ISL1208 RTC */
|
||||
|
||||
#define I2C_DRIVERID_I2CDEV 900
|
||||
#define I2C_DRIVERID_ARP 902 /* SMBus ARP Client */
|
||||
|
|
|
|||
|
|
@ -193,6 +193,8 @@ struct i2c_algorithm {
|
|||
to NULL. If an adapter algorithm can do SMBus access, set
|
||||
smbus_xfer. If set to NULL, the SMBus protocol is simulated
|
||||
using common I2C messages */
|
||||
/* master_xfer should return the number of messages successfully
|
||||
processed, or a negative value on error */
|
||||
int (*master_xfer)(struct i2c_adapter *adap,struct i2c_msg *msgs,
|
||||
int num);
|
||||
int (*smbus_xfer) (struct i2c_adapter *adap, u16 addr,
|
||||
|
|
|
|||
|
|
@ -571,6 +571,7 @@ typedef struct ide_drive_s {
|
|||
u8 waiting_for_dma; /* dma currently in progress */
|
||||
u8 unmask; /* okay to unmask other irqs */
|
||||
u8 bswap; /* byte swap data */
|
||||
u8 noflush; /* don't attempt flushes */
|
||||
u8 dsc_overlap; /* DSC overlap */
|
||||
u8 nice1; /* give potential excess bandwidth */
|
||||
|
||||
|
|
|
|||
|
|
@ -23,8 +23,8 @@ struct vlan_collection;
|
|||
struct vlan_dev_info;
|
||||
struct hlist_node;
|
||||
|
||||
#include <linux/proc_fs.h> /* for proc_dir_entry */
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/etherdevice.h>
|
||||
|
||||
#define VLAN_HLEN 4 /* The additional bytes (on top of the Ethernet header)
|
||||
* that VLAN requires.
|
||||
|
|
@ -155,6 +155,11 @@ static inline int __vlan_hwaccel_rx(struct sk_buff *skb,
|
|||
{
|
||||
struct net_device_stats *stats;
|
||||
|
||||
if (skb_bond_should_drop(skb)) {
|
||||
dev_kfree_skb_any(skb);
|
||||
return NET_RX_DROP;
|
||||
}
|
||||
|
||||
skb->dev = grp->vlan_devices[vlan_tag & VLAN_VID_MASK];
|
||||
if (skb->dev == NULL) {
|
||||
dev_kfree_skb_any(skb);
|
||||
|
|
@ -185,7 +190,8 @@ static inline int __vlan_hwaccel_rx(struct sk_buff *skb,
|
|||
* This allows the VLAN to have a different MAC than the underlying
|
||||
* device, and still route correctly.
|
||||
*/
|
||||
if (!memcmp(eth_hdr(skb)->h_dest, skb->dev->dev_addr, ETH_ALEN))
|
||||
if (!compare_ether_addr(eth_hdr(skb)->h_dest,
|
||||
skb->dev->dev_addr))
|
||||
skb->pkt_type = PACKET_HOST;
|
||||
break;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -97,7 +97,7 @@ extern struct resource iomem_resource;
|
|||
extern int request_resource(struct resource *root, struct resource *new);
|
||||
extern struct resource * ____request_resource(struct resource *root, struct resource *new);
|
||||
extern int release_resource(struct resource *new);
|
||||
extern __deprecated_for_modules int insert_resource(struct resource *parent, struct resource *new);
|
||||
extern int insert_resource(struct resource *parent, struct resource *new);
|
||||
extern int allocate_resource(struct resource *root, struct resource *new,
|
||||
resource_size_t size, resource_size_t min,
|
||||
resource_size_t max, resource_size_t align,
|
||||
|
|
|
|||
|
|
@ -59,27 +59,6 @@ static inline int task_nice_ioprio(struct task_struct *task)
|
|||
/*
|
||||
* For inheritance, return the highest of the two given priorities
|
||||
*/
|
||||
static inline int ioprio_best(unsigned short aprio, unsigned short bprio)
|
||||
{
|
||||
unsigned short aclass = IOPRIO_PRIO_CLASS(aprio);
|
||||
unsigned short bclass = IOPRIO_PRIO_CLASS(bprio);
|
||||
|
||||
if (!ioprio_valid(aprio))
|
||||
return bprio;
|
||||
if (!ioprio_valid(bprio))
|
||||
return aprio;
|
||||
|
||||
if (aclass == IOPRIO_CLASS_NONE)
|
||||
aclass = IOPRIO_CLASS_BE;
|
||||
if (bclass == IOPRIO_CLASS_NONE)
|
||||
bclass = IOPRIO_CLASS_BE;
|
||||
|
||||
if (aclass == bclass)
|
||||
return min(aprio, bprio);
|
||||
if (aclass > bclass)
|
||||
return bprio;
|
||||
else
|
||||
return aprio;
|
||||
}
|
||||
extern int ioprio_best(unsigned short aprio, unsigned short bprio);
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -47,8 +47,8 @@
|
|||
#define IRQ_WAITING 0x00200000 /* IRQ not yet seen - for autodetection */
|
||||
#define IRQ_LEVEL 0x00400000 /* IRQ level triggered */
|
||||
#define IRQ_MASKED 0x00800000 /* IRQ masked - shouldn't be seen again */
|
||||
#define IRQ_PER_CPU 0x01000000 /* IRQ is per CPU */
|
||||
#ifdef CONFIG_IRQ_PER_CPU
|
||||
# define IRQ_PER_CPU 0x01000000 /* IRQ is per CPU */
|
||||
# define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU)
|
||||
#else
|
||||
# define CHECK_IRQ_PER_CPU(var) 0
|
||||
|
|
@ -58,6 +58,7 @@
|
|||
#define IRQ_NOREQUEST 0x04000000 /* IRQ cannot be requested */
|
||||
#define IRQ_NOAUTOEN 0x08000000 /* IRQ will not be enabled on request irq */
|
||||
#define IRQ_DELAYED_DISABLE 0x10000000 /* IRQ disable (masking) happens delayed. */
|
||||
#define IRQ_WAKEUP 0x20000000 /* IRQ triggers system wakeup */
|
||||
|
||||
struct proc_dir_entry;
|
||||
|
||||
|
|
@ -124,6 +125,7 @@ struct irq_chip {
|
|||
* @action: the irq action chain
|
||||
* @status: status information
|
||||
* @depth: disable-depth, for nested irq_disable() calls
|
||||
* @wake_depth: enable depth, for multiple set_irq_wake() callers
|
||||
* @irq_count: stats field to detect stalled irqs
|
||||
* @irqs_unhandled: stats field for spurious unhandled interrupts
|
||||
* @lock: locking for SMP
|
||||
|
|
@ -147,6 +149,7 @@ struct irq_desc {
|
|||
unsigned int status; /* IRQ status */
|
||||
|
||||
unsigned int depth; /* nested irq disables */
|
||||
unsigned int wake_depth; /* nested wake enables */
|
||||
unsigned int irq_count; /* For detecting broken IRQs */
|
||||
unsigned int irqs_unhandled;
|
||||
spinlock_t lock;
|
||||
|
|
|
|||
|
|
@ -72,6 +72,9 @@ extern int journal_enable_debug;
|
|||
#endif
|
||||
|
||||
extern void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry);
|
||||
extern void * jbd_slab_alloc(size_t size, gfp_t flags);
|
||||
extern void jbd_slab_free(void *ptr, size_t size);
|
||||
|
||||
#define jbd_kmalloc(size, flags) \
|
||||
__jbd_kmalloc(__FUNCTION__, (size), (flags), journal_oom_retry)
|
||||
#define jbd_rep_kmalloc(size, flags) \
|
||||
|
|
|
|||
|
|
@ -47,8 +47,8 @@
|
|||
* - (NOM / DEN) fits in (32 - LSH) bits.
|
||||
* - (NOM % DEN) fits in (32 - LSH) bits.
|
||||
*/
|
||||
#define SH_DIV(NOM,DEN,LSH) ( ((NOM / DEN) << LSH) \
|
||||
+ (((NOM % DEN) << LSH) + DEN / 2) / DEN)
|
||||
#define SH_DIV(NOM,DEN,LSH) ( (((NOM) / (DEN)) << (LSH)) \
|
||||
+ ((((NOM) % (DEN)) << (LSH)) + (DEN) / 2) / (DEN))
|
||||
|
||||
/* HZ is the requested value. ACTHZ is actual HZ ("<< 8" is for accuracy) */
|
||||
#define ACTHZ (SH_DIV (CLOCK_TICK_RATE, LATCH, 8))
|
||||
|
|
|
|||
|
|
@ -33,6 +33,7 @@ extern const char linux_banner[];
|
|||
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
|
||||
#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1))
|
||||
#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
|
||||
#define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
|
||||
|
||||
#define KERN_EMERG "<0>" /* system is unusable */
|
||||
#define KERN_ALERT "<1>" /* action must be taken immediately */
|
||||
|
|
@ -209,6 +210,7 @@ extern enum system_states {
|
|||
extern void dump_stack(void);
|
||||
|
||||
#ifdef DEBUG
|
||||
/* If you are writing a driver, please use dev_dbg instead */
|
||||
#define pr_debug(fmt,arg...) \
|
||||
printk(KERN_DEBUG fmt,##arg)
|
||||
#else
|
||||
|
|
|
|||
|
|
@ -46,8 +46,6 @@ enum kobject_action {
|
|||
KOBJ_UMOUNT = (__force kobject_action_t) 0x05, /* umount event for block devices (broken) */
|
||||
KOBJ_OFFLINE = (__force kobject_action_t) 0x06, /* device offline */
|
||||
KOBJ_ONLINE = (__force kobject_action_t) 0x07, /* device online */
|
||||
KOBJ_UNDOCK = (__force kobject_action_t) 0x08, /* undocking */
|
||||
KOBJ_DOCK = (__force kobject_action_t) 0x09, /* dock */
|
||||
};
|
||||
|
||||
struct kobject {
|
||||
|
|
|
|||
|
|
@ -28,7 +28,6 @@ struct task_struct *kthread_create(int (*threadfn)(void *data),
|
|||
|
||||
void kthread_bind(struct task_struct *k, unsigned int cpu);
|
||||
int kthread_stop(struct task_struct *k);
|
||||
int kthread_stop_sem(struct task_struct *k, struct semaphore *s);
|
||||
int kthread_should_stop(void);
|
||||
|
||||
#endif /* _LINUX_KTHREAD_H */
|
||||
|
|
|
|||
|
|
@ -56,7 +56,8 @@ typedef union {
|
|||
#endif
|
||||
} ktime_t;
|
||||
|
||||
#define KTIME_MAX (~((u64)1 << 63))
|
||||
#define KTIME_MAX ((s64)~((u64)1 << 63))
|
||||
#define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
|
||||
|
||||
/*
|
||||
* ktime_t definitions when using the 64-bit scalar representation:
|
||||
|
|
@ -73,6 +74,10 @@ typedef union {
|
|||
*/
|
||||
static inline ktime_t ktime_set(const long secs, const unsigned long nsecs)
|
||||
{
|
||||
#if (BITS_PER_LONG == 64)
|
||||
if (unlikely(secs >= KTIME_SEC_MAX))
|
||||
return (ktime_t){ .tv64 = KTIME_MAX };
|
||||
#endif
|
||||
return (ktime_t) { .tv64 = (s64)secs * NSEC_PER_SEC + (s64)nsecs };
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -131,6 +131,7 @@ enum {
|
|||
ATA_DFLAG_CFG_MASK = (1 << 8) - 1,
|
||||
|
||||
ATA_DFLAG_PIO = (1 << 8), /* device currently in PIO mode */
|
||||
ATA_DFLAG_SUSPENDED = (1 << 9), /* device suspended */
|
||||
ATA_DFLAG_INIT_MASK = (1 << 16) - 1,
|
||||
|
||||
ATA_DFLAG_DETACH = (1 << 16),
|
||||
|
|
@ -160,22 +161,28 @@ enum {
|
|||
ATA_FLAG_HRST_TO_RESUME = (1 << 11), /* hardreset to resume phy */
|
||||
ATA_FLAG_SKIP_D2H_BSY = (1 << 12), /* can't wait for the first D2H
|
||||
* Register FIS clearing BSY */
|
||||
|
||||
ATA_FLAG_DEBUGMSG = (1 << 13),
|
||||
ATA_FLAG_FLUSH_PORT_TASK = (1 << 14), /* flush port task */
|
||||
|
||||
ATA_FLAG_EH_PENDING = (1 << 15), /* EH pending */
|
||||
ATA_FLAG_EH_IN_PROGRESS = (1 << 16), /* EH in progress */
|
||||
ATA_FLAG_FROZEN = (1 << 17), /* port is frozen */
|
||||
ATA_FLAG_RECOVERED = (1 << 18), /* recovery action performed */
|
||||
ATA_FLAG_LOADING = (1 << 19), /* boot/loading probe */
|
||||
ATA_FLAG_UNLOADING = (1 << 20), /* module is unloading */
|
||||
ATA_FLAG_SCSI_HOTPLUG = (1 << 21), /* SCSI hotplug scheduled */
|
||||
/* The following flag belongs to ap->pflags but is kept in
|
||||
* ap->flags because it's referenced in many LLDs and will be
|
||||
* removed in not-too-distant future.
|
||||
*/
|
||||
ATA_FLAG_DISABLED = (1 << 23), /* port is disabled, ignore it */
|
||||
|
||||
ATA_FLAG_DISABLED = (1 << 22), /* port is disabled, ignore it */
|
||||
ATA_FLAG_SUSPENDED = (1 << 23), /* port is suspended (power) */
|
||||
/* bits 24:31 of ap->flags are reserved for LLD specific flags */
|
||||
|
||||
/* bits 24:31 of ap->flags are reserved for LLDD specific flags */
|
||||
/* struct ata_port pflags */
|
||||
ATA_PFLAG_EH_PENDING = (1 << 0), /* EH pending */
|
||||
ATA_PFLAG_EH_IN_PROGRESS = (1 << 1), /* EH in progress */
|
||||
ATA_PFLAG_FROZEN = (1 << 2), /* port is frozen */
|
||||
ATA_PFLAG_RECOVERED = (1 << 3), /* recovery action performed */
|
||||
ATA_PFLAG_LOADING = (1 << 4), /* boot/loading probe */
|
||||
ATA_PFLAG_UNLOADING = (1 << 5), /* module is unloading */
|
||||
ATA_PFLAG_SCSI_HOTPLUG = (1 << 6), /* SCSI hotplug scheduled */
|
||||
|
||||
ATA_PFLAG_FLUSH_PORT_TASK = (1 << 16), /* flush port task */
|
||||
ATA_PFLAG_SUSPENDED = (1 << 17), /* port is suspended (power) */
|
||||
ATA_PFLAG_PM_PENDING = (1 << 18), /* PM operation pending */
|
||||
|
||||
/* struct ata_queued_cmd flags */
|
||||
ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */
|
||||
|
|
@ -248,15 +255,24 @@ enum {
|
|||
ATA_EH_REVALIDATE = (1 << 0),
|
||||
ATA_EH_SOFTRESET = (1 << 1),
|
||||
ATA_EH_HARDRESET = (1 << 2),
|
||||
ATA_EH_SUSPEND = (1 << 3),
|
||||
ATA_EH_RESUME = (1 << 4),
|
||||
ATA_EH_PM_FREEZE = (1 << 5),
|
||||
|
||||
ATA_EH_RESET_MASK = ATA_EH_SOFTRESET | ATA_EH_HARDRESET,
|
||||
ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE,
|
||||
ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE | ATA_EH_SUSPEND |
|
||||
ATA_EH_RESUME | ATA_EH_PM_FREEZE,
|
||||
|
||||
/* ata_eh_info->flags */
|
||||
ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */
|
||||
ATA_EHI_RESUME_LINK = (1 << 1), /* resume link (reset modifier) */
|
||||
ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */
|
||||
ATA_EHI_QUIET = (1 << 3), /* be quiet */
|
||||
|
||||
ATA_EHI_DID_RESET = (1 << 16), /* already reset this port */
|
||||
|
||||
ATA_EHI_RESET_MODIFIER_MASK = ATA_EHI_RESUME_LINK,
|
||||
|
||||
/* max repeat if error condition is still set after ->error_handler */
|
||||
ATA_EH_MAX_REPEAT = 5,
|
||||
|
||||
|
|
@ -486,6 +502,7 @@ struct ata_port {
|
|||
const struct ata_port_operations *ops;
|
||||
spinlock_t *lock;
|
||||
unsigned long flags; /* ATA_FLAG_xxx */
|
||||
unsigned int pflags; /* ATA_PFLAG_xxx */
|
||||
unsigned int id; /* unique id req'd by scsi midlyr */
|
||||
unsigned int port_no; /* unique port #; from zero */
|
||||
unsigned int hard_port_no; /* hardware port #; from zero */
|
||||
|
|
@ -535,6 +552,9 @@ struct ata_port {
|
|||
struct list_head eh_done_q;
|
||||
wait_queue_head_t eh_wait_q;
|
||||
|
||||
pm_message_t pm_mesg;
|
||||
int *pm_result;
|
||||
|
||||
void *private_data;
|
||||
|
||||
u8 sector_buf[ATA_SECT_SIZE]; /* owned by EH */
|
||||
|
|
@ -589,6 +609,9 @@ struct ata_port_operations {
|
|||
void (*scr_write) (struct ata_port *ap, unsigned int sc_reg,
|
||||
u32 val);
|
||||
|
||||
int (*port_suspend) (struct ata_port *ap, pm_message_t mesg);
|
||||
int (*port_resume) (struct ata_port *ap);
|
||||
|
||||
int (*port_start) (struct ata_port *ap);
|
||||
void (*port_stop) (struct ata_port *ap);
|
||||
|
||||
|
|
@ -622,9 +645,18 @@ struct ata_timing {
|
|||
|
||||
#define FIT(v,vmin,vmax) max_t(short,min_t(short,v,vmax),vmin)
|
||||
|
||||
extern const unsigned long sata_deb_timing_boot[];
|
||||
extern const unsigned long sata_deb_timing_eh[];
|
||||
extern const unsigned long sata_deb_timing_before_fsrst[];
|
||||
extern const unsigned long sata_deb_timing_normal[];
|
||||
extern const unsigned long sata_deb_timing_hotplug[];
|
||||
extern const unsigned long sata_deb_timing_long[];
|
||||
|
||||
static inline const unsigned long *
|
||||
sata_ehc_deb_timing(struct ata_eh_context *ehc)
|
||||
{
|
||||
if (ehc->i.flags & ATA_EHI_HOTPLUGGED)
|
||||
return sata_deb_timing_hotplug;
|
||||
else
|
||||
return sata_deb_timing_normal;
|
||||
}
|
||||
|
||||
extern void ata_port_probe(struct ata_port *);
|
||||
extern void __sata_phy_reset(struct ata_port *ap);
|
||||
|
|
@ -644,6 +676,8 @@ extern void ata_std_ports(struct ata_ioports *ioaddr);
|
|||
extern int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
|
||||
unsigned int n_ports);
|
||||
extern void ata_pci_remove_one (struct pci_dev *pdev);
|
||||
extern void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t state);
|
||||
extern void ata_pci_device_do_resume(struct pci_dev *pdev);
|
||||
extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state);
|
||||
extern int ata_pci_device_resume(struct pci_dev *pdev);
|
||||
extern int ata_pci_clear_simplex(struct pci_dev *pdev);
|
||||
|
|
@ -664,8 +698,9 @@ extern int ata_port_online(struct ata_port *ap);
|
|||
extern int ata_port_offline(struct ata_port *ap);
|
||||
extern int ata_scsi_device_resume(struct scsi_device *);
|
||||
extern int ata_scsi_device_suspend(struct scsi_device *, pm_message_t state);
|
||||
extern int ata_device_resume(struct ata_device *);
|
||||
extern int ata_device_suspend(struct ata_device *, pm_message_t state);
|
||||
extern int ata_host_set_suspend(struct ata_host_set *host_set,
|
||||
pm_message_t mesg);
|
||||
extern void ata_host_set_resume(struct ata_host_set *host_set);
|
||||
extern int ata_ratelimit(void);
|
||||
extern unsigned int ata_busy_sleep(struct ata_port *ap,
|
||||
unsigned long timeout_pat,
|
||||
|
|
@ -825,19 +860,24 @@ extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
|
|||
(ehi)->desc_len = 0; \
|
||||
} while (0)
|
||||
|
||||
static inline void ata_ehi_hotplugged(struct ata_eh_info *ehi)
|
||||
static inline void __ata_ehi_hotplugged(struct ata_eh_info *ehi)
|
||||
{
|
||||
if (ehi->flags & ATA_EHI_HOTPLUGGED)
|
||||
return;
|
||||
|
||||
ehi->flags |= ATA_EHI_HOTPLUGGED;
|
||||
ehi->flags |= ATA_EHI_HOTPLUGGED | ATA_EHI_RESUME_LINK;
|
||||
ehi->hotplug_timestamp = jiffies;
|
||||
|
||||
ehi->err_mask |= AC_ERR_ATA_BUS;
|
||||
ehi->action |= ATA_EH_SOFTRESET;
|
||||
ehi->probe_mask |= (1 << ATA_MAX_DEVICES) - 1;
|
||||
}
|
||||
|
||||
static inline void ata_ehi_hotplugged(struct ata_eh_info *ehi)
|
||||
{
|
||||
__ata_ehi_hotplugged(ehi);
|
||||
ehi->err_mask |= AC_ERR_ATA_BUS;
|
||||
}
|
||||
|
||||
/*
|
||||
* qc helpers
|
||||
*/
|
||||
|
|
@ -921,6 +961,11 @@ static inline unsigned int ata_dev_absent(const struct ata_device *dev)
|
|||
return ata_class_absent(dev->class);
|
||||
}
|
||||
|
||||
static inline unsigned int ata_dev_ready(const struct ata_device *dev)
|
||||
{
|
||||
return ata_dev_enabled(dev) && !(dev->flags & ATA_DFLAG_SUSPENDED);
|
||||
}
|
||||
|
||||
/*
|
||||
* port helpers
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -264,6 +264,17 @@ static inline void list_move_tail(struct list_head *list,
|
|||
list_add_tail(list, head);
|
||||
}
|
||||
|
||||
/**
|
||||
* list_is_last - tests whether @list is the last entry in list @head
|
||||
* @list: the entry to test
|
||||
* @head: the head of the list
|
||||
*/
|
||||
static inline int list_is_last(const struct list_head *list,
|
||||
const struct list_head *head)
|
||||
{
|
||||
return list->next == head;
|
||||
}
|
||||
|
||||
/**
|
||||
* list_empty - tests whether a list is empty
|
||||
* @head: the list to test.
|
||||
|
|
|
|||
|
|
@ -123,7 +123,6 @@ struct nlm_block {
|
|||
unsigned int b_id; /* block id */
|
||||
unsigned char b_queued; /* re-queued */
|
||||
unsigned char b_granted; /* VFS granted lock */
|
||||
unsigned char b_done; /* callback complete */
|
||||
struct nlm_file * b_file; /* file in question */
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -120,7 +120,7 @@ struct lock_class {
|
|||
*/
|
||||
struct lockdep_map {
|
||||
struct lock_class_key *key;
|
||||
struct lock_class *class[MAX_LOCKDEP_SUBCLASSES];
|
||||
struct lock_class *class_cache;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -89,4 +89,11 @@ extern spinlock_t rtc_lock; /* serialize CMOS RAM access */
|
|||
# define RTC_VRT 0x80 /* valid RAM and time */
|
||||
/**********************************************************************/
|
||||
|
||||
#ifndef ARCH_RTC_LOCATION /* Override by <asm/mc146818rtc.h>? */
|
||||
|
||||
#define RTC_IO_EXTENT 0x8
|
||||
#define RTC_IOMAPPED 1 /* Default to I/O mapping. */
|
||||
|
||||
#endif /* ARCH_RTC_LOCATION */
|
||||
|
||||
#endif /* _MC146818RTC_H */
|
||||
|
|
|
|||
|
|
@ -336,6 +336,7 @@ static inline void init_page_count(struct page *page)
|
|||
}
|
||||
|
||||
void put_page(struct page *page);
|
||||
void put_pages_list(struct list_head *pages);
|
||||
|
||||
void split_page(struct page *page, unsigned int order);
|
||||
|
||||
|
|
|
|||
|
|
@ -77,7 +77,7 @@ struct mmc_host {
|
|||
struct device *dev;
|
||||
struct class_device class_dev;
|
||||
int index;
|
||||
struct mmc_host_ops *ops;
|
||||
const struct mmc_host_ops *ops;
|
||||
unsigned int f_min;
|
||||
unsigned int f_max;
|
||||
u32 ocr_avail;
|
||||
|
|
|
|||
|
|
@ -105,6 +105,8 @@ extern int mmc_wait_for_cmd(struct mmc_host *, struct mmc_command *, int);
|
|||
extern int mmc_wait_for_app_cmd(struct mmc_host *, unsigned int,
|
||||
struct mmc_command *, int);
|
||||
|
||||
extern void mmc_set_data_timeout(struct mmc_data *, const struct mmc_card *, int);
|
||||
|
||||
extern int __mmc_claim_host(struct mmc_host *host, struct mmc_card *card);
|
||||
|
||||
static inline void mmc_claim_host(struct mmc_host *host)
|
||||
|
|
|
|||
|
|
@ -77,6 +77,7 @@ struct per_cpu_pages {
|
|||
struct per_cpu_pageset {
|
||||
struct per_cpu_pages pcp[2]; /* 0: hot. 1: cold */
|
||||
#ifdef CONFIG_SMP
|
||||
s8 stat_threshold;
|
||||
s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
|
||||
#endif
|
||||
} ____cacheline_aligned_in_smp;
|
||||
|
|
|
|||
|
|
@ -362,10 +362,8 @@ int is_module_address(unsigned long addr);
|
|||
|
||||
/* Returns module and fills in value, defined and namebuf, or NULL if
|
||||
symnum out of range. */
|
||||
struct module *module_get_kallsym(unsigned int symnum,
|
||||
unsigned long *value,
|
||||
char *type,
|
||||
char namebuf[128]);
|
||||
struct module *module_get_kallsym(unsigned int symnum, unsigned long *value,
|
||||
char *type, char *name, size_t namelen);
|
||||
|
||||
/* Look for this name: can be of form module:name. */
|
||||
unsigned long module_kallsyms_lookup_name(const char *name);
|
||||
|
|
@ -535,8 +533,8 @@ static inline const char *module_address_lookup(unsigned long addr,
|
|||
|
||||
static inline struct module *module_get_kallsym(unsigned int symnum,
|
||||
unsigned long *value,
|
||||
char *type,
|
||||
char namebuf[128])
|
||||
char *type, char *name,
|
||||
size_t namelen)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ struct open_intent {
|
|||
struct file *file;
|
||||
};
|
||||
|
||||
enum { MAX_NESTED_LINKS = 5 };
|
||||
enum { MAX_NESTED_LINKS = 8 };
|
||||
|
||||
struct nameidata {
|
||||
struct dentry *dentry;
|
||||
|
|
|
|||
|
|
@ -320,6 +320,9 @@ struct net_device
|
|||
#define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
|
||||
#define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
|
||||
|
||||
/* List of features with software fallbacks. */
|
||||
#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
|
||||
|
||||
#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
|
||||
#define NETIF_F_ALL_CSUM (NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM)
|
||||
|
||||
|
|
@ -549,6 +552,7 @@ struct packet_type {
|
|||
struct net_device *);
|
||||
struct sk_buff *(*gso_segment)(struct sk_buff *skb,
|
||||
int features);
|
||||
int (*gso_send_check)(struct sk_buff *skb);
|
||||
void *af_packet_priv;
|
||||
struct list_head list;
|
||||
};
|
||||
|
|
@ -923,10 +927,10 @@ static inline void netif_tx_lock_bh(struct net_device *dev)
|
|||
|
||||
static inline int netif_tx_trylock(struct net_device *dev)
|
||||
{
|
||||
int err = spin_trylock(&dev->_xmit_lock);
|
||||
if (!err)
|
||||
int ok = spin_trylock(&dev->_xmit_lock);
|
||||
if (likely(ok))
|
||||
dev->xmit_lock_owner = smp_processor_id();
|
||||
return err;
|
||||
return ok;
|
||||
}
|
||||
|
||||
static inline void netif_tx_unlock(struct net_device *dev)
|
||||
|
|
@ -1001,13 +1005,38 @@ static inline int net_gso_ok(int features, int gso_type)
|
|||
|
||||
static inline int skb_gso_ok(struct sk_buff *skb, int features)
|
||||
{
|
||||
return net_gso_ok(features, skb_shinfo(skb)->gso_size ?
|
||||
skb_shinfo(skb)->gso_type : 0);
|
||||
return net_gso_ok(features, skb_shinfo(skb)->gso_type);
|
||||
}
|
||||
|
||||
static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
|
||||
{
|
||||
return !skb_gso_ok(skb, dev->features);
|
||||
return skb_is_gso(skb) &&
|
||||
(!skb_gso_ok(skb, dev->features) ||
|
||||
unlikely(skb->ip_summed != CHECKSUM_HW));
|
||||
}
|
||||
|
||||
/* On bonding slaves other than the currently active slave, suppress
|
||||
* duplicates except for 802.3ad ETH_P_SLOW and alb non-mcast/bcast.
|
||||
*/
|
||||
static inline int skb_bond_should_drop(struct sk_buff *skb)
|
||||
{
|
||||
struct net_device *dev = skb->dev;
|
||||
struct net_device *master = dev->master;
|
||||
|
||||
if (master &&
|
||||
(dev->priv_flags & IFF_SLAVE_INACTIVE)) {
|
||||
if (master->priv_flags & IFF_MASTER_ALB) {
|
||||
if (skb->pkt_type != PACKET_BROADCAST &&
|
||||
skb->pkt_type != PACKET_MULTICAST)
|
||||
return 0;
|
||||
}
|
||||
if (master->priv_flags & IFF_MASTER_8023AD &&
|
||||
skb->protocol == __constant_htons(ETH_P_SLOW))
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ header-y := nf_conntrack_sctp.h nf_conntrack_tuple_common.h \
|
|||
xt_helper.h xt_length.h xt_limit.h xt_mac.h xt_mark.h \
|
||||
xt_MARK.h xt_multiport.h xt_NFQUEUE.h xt_pkttype.h \
|
||||
xt_policy.h xt_realm.h xt_sctp.h xt_state.h xt_string.h \
|
||||
xt_tcpmss.h xt_tcpudp.h
|
||||
xt_tcpmss.h xt_tcpudp.h xt_SECMARK.h xt_CONNSECMARK.h
|
||||
|
||||
unifdef-y := nf_conntrack_common.h nf_conntrack_ftp.h \
|
||||
nf_conntrack_tcp.h nfnetlink.h x_tables.h xt_physdev.h
|
||||
|
|
|
|||
|
|
@ -6,7 +6,6 @@
|
|||
|
||||
#include <linux/netfilter.h>
|
||||
#if defined(__KERNEL__) && defined(CONFIG_BRIDGE_NETFILTER)
|
||||
#include <asm/atomic.h>
|
||||
#include <linux/if_ether.h>
|
||||
#endif
|
||||
|
||||
|
|
@ -49,15 +48,25 @@ enum nf_br_hook_priorities {
|
|||
|
||||
/* Only used in br_forward.c */
|
||||
static inline
|
||||
void nf_bridge_maybe_copy_header(struct sk_buff *skb)
|
||||
int nf_bridge_maybe_copy_header(struct sk_buff *skb)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (skb->nf_bridge) {
|
||||
if (skb->protocol == __constant_htons(ETH_P_8021Q)) {
|
||||
err = skb_cow(skb, 18);
|
||||
if (err)
|
||||
return err;
|
||||
memcpy(skb->data - 18, skb->nf_bridge->data, 18);
|
||||
skb_push(skb, 4);
|
||||
} else
|
||||
} else {
|
||||
err = skb_cow(skb, 16);
|
||||
if (err)
|
||||
return err;
|
||||
memcpy(skb->data - 16, skb->nf_bridge->data, 16);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* This is called by the IP fragmenting code and it ensures there is
|
||||
|
|
@ -79,6 +88,8 @@ struct bridge_skb_cb {
|
|||
__u32 ipv4;
|
||||
} daddr;
|
||||
};
|
||||
|
||||
extern int brnf_deferred_hooks;
|
||||
#endif /* CONFIG_BRIDGE_NETFILTER */
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
|
|
|||
|
|
@ -157,6 +157,12 @@ enum nfs_opnum4 {
|
|||
OP_ILLEGAL = 10044,
|
||||
};
|
||||
|
||||
/*Defining first and last NFS4 operations implemented.
|
||||
Needs to be updated if more operations are defined in future.*/
|
||||
|
||||
#define FIRST_NFS4_OP OP_ACCESS
|
||||
#define LAST_NFS4_OP OP_RELEASE_LOCKOWNER
|
||||
|
||||
enum nfsstat4 {
|
||||
NFS4_OK = 0,
|
||||
NFS4ERR_PERM = 1,
|
||||
|
|
|
|||
|
|
@ -9,6 +9,36 @@
|
|||
#ifndef _LINUX_NFS_FS_H
|
||||
#define _LINUX_NFS_FS_H
|
||||
|
||||
/*
|
||||
* Enable debugging support for nfs client.
|
||||
* Requires RPC_DEBUG.
|
||||
*/
|
||||
#ifdef RPC_DEBUG
|
||||
# define NFS_DEBUG
|
||||
#endif
|
||||
|
||||
/* Default timeout values */
|
||||
#define NFS_MAX_UDP_TIMEOUT (60*HZ)
|
||||
#define NFS_MAX_TCP_TIMEOUT (600*HZ)
|
||||
|
||||
/*
|
||||
* superblock magic number for NFS
|
||||
*/
|
||||
#define NFS_SUPER_MAGIC 0x6969
|
||||
|
||||
/*
|
||||
* When flushing a cluster of dirty pages, there can be different
|
||||
* strategies:
|
||||
*/
|
||||
#define FLUSH_SYNC 1 /* file being synced, or contention */
|
||||
#define FLUSH_STABLE 4 /* commit to stable storage */
|
||||
#define FLUSH_LOWPRI 8 /* low priority background flush */
|
||||
#define FLUSH_HIGHPRI 16 /* high priority memory reclaim flush */
|
||||
#define FLUSH_NOCOMMIT 32 /* Don't send the NFSv3/v4 COMMIT */
|
||||
#define FLUSH_INVALIDATE 64 /* Invalidate the page cache */
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/in.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/pagemap.h>
|
||||
|
|
@ -30,41 +60,11 @@
|
|||
#include <linux/rwsem.h>
|
||||
#include <linux/mempool.h>
|
||||
|
||||
/*
|
||||
* Enable debugging support for nfs client.
|
||||
* Requires RPC_DEBUG.
|
||||
*/
|
||||
#ifdef RPC_DEBUG
|
||||
# define NFS_DEBUG
|
||||
#endif
|
||||
|
||||
/* Default timeout values */
|
||||
#define NFS_MAX_UDP_TIMEOUT (60*HZ)
|
||||
#define NFS_MAX_TCP_TIMEOUT (600*HZ)
|
||||
|
||||
/*
|
||||
* superblock magic number for NFS
|
||||
*/
|
||||
#define NFS_SUPER_MAGIC 0x6969
|
||||
|
||||
/*
|
||||
* These are the default flags for swap requests
|
||||
*/
|
||||
#define NFS_RPC_SWAPFLAGS (RPC_TASK_SWAPPER|RPC_TASK_ROOTCREDS)
|
||||
|
||||
/*
|
||||
* When flushing a cluster of dirty pages, there can be different
|
||||
* strategies:
|
||||
*/
|
||||
#define FLUSH_SYNC 1 /* file being synced, or contention */
|
||||
#define FLUSH_STABLE 4 /* commit to stable storage */
|
||||
#define FLUSH_LOWPRI 8 /* low priority background flush */
|
||||
#define FLUSH_HIGHPRI 16 /* high priority memory reclaim flush */
|
||||
#define FLUSH_NOCOMMIT 32 /* Don't send the NFSv3/v4 COMMIT */
|
||||
#define FLUSH_INVALIDATE 64 /* Invalidate the page cache */
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
/*
|
||||
* NFSv3/v4 Access mode cache entry
|
||||
*/
|
||||
|
|
@ -427,7 +427,7 @@ extern int nfs_writeback_done(struct rpc_task *, struct nfs_write_data *);
|
|||
extern void nfs_writedata_release(void *);
|
||||
|
||||
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
|
||||
struct nfs_write_data *nfs_commit_alloc(unsigned int pagecount);
|
||||
struct nfs_write_data *nfs_commit_alloc(void);
|
||||
void nfs_commit_free(struct nfs_write_data *p);
|
||||
#endif
|
||||
|
||||
|
|
@ -476,10 +476,9 @@ static inline int nfs_wb_page(struct inode *inode, struct page* page)
|
|||
}
|
||||
|
||||
/*
|
||||
* Allocate and free nfs_write_data structures
|
||||
* Allocate nfs_write_data structures
|
||||
*/
|
||||
extern struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount);
|
||||
extern void nfs_writedata_free(struct nfs_write_data *p);
|
||||
extern struct nfs_write_data *nfs_writedata_alloc(size_t len);
|
||||
|
||||
/*
|
||||
* linux/fs/nfs/read.c
|
||||
|
|
@ -491,10 +490,9 @@ extern int nfs_readpage_result(struct rpc_task *, struct nfs_read_data *);
|
|||
extern void nfs_readdata_release(void *data);
|
||||
|
||||
/*
|
||||
* Allocate and free nfs_read_data structures
|
||||
* Allocate nfs_read_data structures
|
||||
*/
|
||||
extern struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount);
|
||||
extern void nfs_readdata_free(struct nfs_read_data *p);
|
||||
extern struct nfs_read_data *nfs_readdata_alloc(size_t len);
|
||||
|
||||
/*
|
||||
* linux/fs/nfs3proc.c
|
||||
|
|
|
|||
|
|
@ -659,7 +659,7 @@ struct nfs4_rename_res {
|
|||
struct nfs4_setclientid {
|
||||
const nfs4_verifier * sc_verifier; /* request */
|
||||
unsigned int sc_name_len;
|
||||
char sc_name[32]; /* request */
|
||||
char sc_name[48]; /* request */
|
||||
u32 sc_prog; /* request */
|
||||
unsigned int sc_netid_len;
|
||||
char sc_netid[4]; /* request */
|
||||
|
|
@ -729,6 +729,7 @@ struct nfs_read_data {
|
|||
struct list_head pages; /* Coalesced read requests */
|
||||
struct nfs_page *req; /* multi ops per nfs_page */
|
||||
struct page **pagevec;
|
||||
unsigned int npages; /* Max length of pagevec */
|
||||
struct nfs_readargs args;
|
||||
struct nfs_readres res;
|
||||
#ifdef CONFIG_NFS_V4
|
||||
|
|
@ -747,6 +748,7 @@ struct nfs_write_data {
|
|||
struct list_head pages; /* Coalesced requests we wish to flush */
|
||||
struct nfs_page *req; /* multi ops per nfs_page */
|
||||
struct page **pagevec;
|
||||
unsigned int npages; /* Max length of pagevec */
|
||||
struct nfs_writeargs args; /* argument struct */
|
||||
struct nfs_writeres res; /* result struct */
|
||||
#ifdef CONFIG_NFS_V4
|
||||
|
|
|
|||
|
|
@ -9,6 +9,8 @@
|
|||
#ifndef LINUX_NFSD_STATS_H
|
||||
#define LINUX_NFSD_STATS_H
|
||||
|
||||
#include <linux/nfs4.h>
|
||||
|
||||
struct nfsd_stats {
|
||||
unsigned int rchits; /* repcache hits */
|
||||
unsigned int rcmisses; /* repcache hits */
|
||||
|
|
@ -27,6 +29,10 @@ struct nfsd_stats {
|
|||
unsigned int ra_size; /* size of ra cache */
|
||||
unsigned int ra_depth[11]; /* number of times ra entry was found that deep
|
||||
* in the cache (10percentiles). [10] = not found */
|
||||
#ifdef CONFIG_NFSD_V4
|
||||
unsigned int nfs4_opcount[LAST_NFS4_OP + 1]; /* count of individual nfsv4 operations */
|
||||
#endif
|
||||
|
||||
};
|
||||
|
||||
/* thread usage wraps very million seconds (approx one fortnight) */
|
||||
|
|
|
|||
|
|
@ -30,12 +30,20 @@ extern struct node node_devices[];
|
|||
|
||||
extern int register_node(struct node *, int, struct node *);
|
||||
extern void unregister_node(struct node *node);
|
||||
#ifdef CONFIG_NUMA
|
||||
extern int register_one_node(int nid);
|
||||
extern void unregister_one_node(int nid);
|
||||
#ifdef CONFIG_NUMA
|
||||
extern int register_cpu_under_node(unsigned int cpu, unsigned int nid);
|
||||
extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid);
|
||||
#else
|
||||
static inline int register_one_node(int nid)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int unregister_one_node(int nid)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int register_cpu_under_node(unsigned int cpu, unsigned int nid)
|
||||
{
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -25,8 +25,6 @@ struct nsc_gpio_ops {
|
|||
void (*gpio_dump) (struct nsc_gpio_ops *amp, unsigned iminor);
|
||||
int (*gpio_get) (unsigned iminor);
|
||||
void (*gpio_set) (unsigned iminor, int state);
|
||||
void (*gpio_set_high)(unsigned iminor);
|
||||
void (*gpio_set_low) (unsigned iminor);
|
||||
void (*gpio_change) (unsigned iminor);
|
||||
int (*gpio_current) (unsigned iminor);
|
||||
struct device* dev; /* for dev_dbg() support, set in init */
|
||||
|
|
|
|||
|
|
@ -161,6 +161,7 @@ struct pci_dev {
|
|||
unsigned int is_enabled:1; /* pci_enable_device has been called */
|
||||
unsigned int is_busmaster:1; /* device is busmaster */
|
||||
unsigned int no_msi:1; /* device may not use msi */
|
||||
unsigned int no_d1d2:1; /* only allow d0 or d3 */
|
||||
unsigned int block_ucfg_access:1; /* userspace config space access is blocked */
|
||||
unsigned int broken_parity_status:1; /* Device generates false positive parity */
|
||||
unsigned int msi_enabled:1;
|
||||
|
|
|
|||
|
|
@ -648,6 +648,8 @@
|
|||
#define PCI_DEVICE_ID_SI_962 0x0962
|
||||
#define PCI_DEVICE_ID_SI_963 0x0963
|
||||
#define PCI_DEVICE_ID_SI_965 0x0965
|
||||
#define PCI_DEVICE_ID_SI_966 0x0966
|
||||
#define PCI_DEVICE_ID_SI_968 0x0968
|
||||
#define PCI_DEVICE_ID_SI_5511 0x5511
|
||||
#define PCI_DEVICE_ID_SI_5513 0x5513
|
||||
#define PCI_DEVICE_ID_SI_5517 0x5517
|
||||
|
|
@ -1292,6 +1294,7 @@
|
|||
#define PCI_DEVICE_ID_VIA_8367_0 0x3099
|
||||
#define PCI_DEVICE_ID_VIA_8653_0 0x3101
|
||||
#define PCI_DEVICE_ID_VIA_8622 0x3102
|
||||
#define PCI_DEVICE_ID_VIA_8235_USB_2 0x3104
|
||||
#define PCI_DEVICE_ID_VIA_8233C_0 0x3109
|
||||
#define PCI_DEVICE_ID_VIA_8361 0x3112
|
||||
#define PCI_DEVICE_ID_VIA_XM266 0x3116
|
||||
|
|
@ -1726,6 +1729,9 @@
|
|||
#define PCI_VENDOR_ID_DOMEX 0x134a
|
||||
#define PCI_DEVICE_ID_DOMEX_DMX3191D 0x0001
|
||||
|
||||
#define PCI_VENDOR_ID_INTASHIELD 0x135a
|
||||
#define PCI_DEVICE_ID_INTASHIELD_IS200 0x0d80
|
||||
|
||||
#define PCI_VENDOR_ID_QUATECH 0x135C
|
||||
#define PCI_DEVICE_ID_QUATECH_QSC100 0x0010
|
||||
#define PCI_DEVICE_ID_QUATECH_DSC100 0x0020
|
||||
|
|
@ -2019,6 +2025,13 @@
|
|||
#define PCI_VENDOR_ID_TDI 0x192E
|
||||
#define PCI_DEVICE_ID_TDI_EHCI 0x0101
|
||||
|
||||
#define PCI_VENDOR_ID_JMICRON 0x197B
|
||||
#define PCI_DEVICE_ID_JMICRON_JMB360 0x2360
|
||||
#define PCI_DEVICE_ID_JMICRON_JMB361 0x2361
|
||||
#define PCI_DEVICE_ID_JMICRON_JMB363 0x2363
|
||||
#define PCI_DEVICE_ID_JMICRON_JMB365 0x2365
|
||||
#define PCI_DEVICE_ID_JMICRON_JMB366 0x2366
|
||||
#define PCI_DEVICE_ID_JMICRON_JMB368 0x2368
|
||||
|
||||
#define PCI_VENDOR_ID_TEKRAM 0x1de1
|
||||
#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29
|
||||
|
|
@ -2135,6 +2148,7 @@
|
|||
#define PCI_DEVICE_ID_INTEL_82820_UP_HB 0x2501
|
||||
#define PCI_DEVICE_ID_INTEL_82850_HB 0x2530
|
||||
#define PCI_DEVICE_ID_INTEL_82860_HB 0x2531
|
||||
#define PCI_DEVICE_ID_INTEL_E7501_MCH 0x254c
|
||||
#define PCI_DEVICE_ID_INTEL_82845G_HB 0x2560
|
||||
#define PCI_DEVICE_ID_INTEL_82845G_IG 0x2562
|
||||
#define PCI_DEVICE_ID_INTEL_82865_HB 0x2570
|
||||
|
|
|
|||
|
|
@ -422,7 +422,23 @@
|
|||
#define PCI_ERR_CAP_ECRC_CHKE 0x00000100 /* ECRC Check Enable */
|
||||
#define PCI_ERR_HEADER_LOG 28 /* Header Log Register (16 bytes) */
|
||||
#define PCI_ERR_ROOT_COMMAND 44 /* Root Error Command */
|
||||
/* Correctable Err Reporting Enable */
|
||||
#define PCI_ERR_ROOT_CMD_COR_EN 0x00000001
|
||||
/* Non-fatal Err Reporting Enable */
|
||||
#define PCI_ERR_ROOT_CMD_NONFATAL_EN 0x00000002
|
||||
/* Fatal Err Reporting Enable */
|
||||
#define PCI_ERR_ROOT_CMD_FATAL_EN 0x00000004
|
||||
#define PCI_ERR_ROOT_STATUS 48
|
||||
#define PCI_ERR_ROOT_COR_RCV 0x00000001 /* ERR_COR Received */
|
||||
/* Multi ERR_COR Received */
|
||||
#define PCI_ERR_ROOT_MULTI_COR_RCV 0x00000002
|
||||
/* ERR_FATAL/NONFATAL Recevied */
|
||||
#define PCI_ERR_ROOT_UNCOR_RCV 0x00000004
|
||||
/* Multi ERR_FATAL/NONFATAL Recevied */
|
||||
#define PCI_ERR_ROOT_MULTI_UNCOR_RCV 0x00000008
|
||||
#define PCI_ERR_ROOT_FIRST_FATAL 0x00000010 /* First Fatal */
|
||||
#define PCI_ERR_ROOT_NONFATAL_RCV 0x00000020 /* Non-Fatal Received */
|
||||
#define PCI_ERR_ROOT_FATAL_RCV 0x00000040 /* Fatal Received */
|
||||
#define PCI_ERR_ROOT_COR_SRC 52
|
||||
#define PCI_ERR_ROOT_SRC 54
|
||||
|
||||
|
|
|
|||
|
|
@ -378,6 +378,7 @@ int phy_mii_ioctl(struct phy_device *phydev,
|
|||
struct mii_ioctl_data *mii_data, int cmd);
|
||||
int phy_start_interrupts(struct phy_device *phydev);
|
||||
void phy_print_status(struct phy_device *phydev);
|
||||
struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id);
|
||||
|
||||
extern struct bus_type mdio_bus_type;
|
||||
#endif /* __PHY_H */
|
||||
|
|
|
|||
|
|
@ -14,11 +14,6 @@ extern int pm_active;
|
|||
struct pm_dev __deprecated *
|
||||
pm_register(pm_dev_t type, unsigned long id, pm_callback callback);
|
||||
|
||||
/*
|
||||
* Unregister all devices with matching callback
|
||||
*/
|
||||
void __deprecated pm_unregister_all(pm_callback callback);
|
||||
|
||||
/*
|
||||
* Send a request to all devices
|
||||
*/
|
||||
|
|
@ -35,8 +30,6 @@ static inline struct pm_dev *pm_register(pm_dev_t type,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static inline void pm_unregister_all(pm_callback callback) {}
|
||||
|
||||
static inline int pm_send_all(pm_request_t rqst, void *data)
|
||||
{
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -231,7 +231,6 @@ extern struct pmu_battery_info pmu_batteries[PMU_MAX_BATTERIES];
|
|||
extern unsigned int pmu_power_flags;
|
||||
|
||||
/* Backlight */
|
||||
extern int disable_kernel_backlight;
|
||||
extern void pmu_backlight_init(struct device_node*);
|
||||
extern void pmu_backlight_init(void);
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
|
|
|||
|
|
@ -148,9 +148,10 @@ struct mddev_s
|
|||
|
||||
struct mdk_thread_s *thread; /* management thread */
|
||||
struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */
|
||||
sector_t curr_resync; /* blocks scheduled */
|
||||
sector_t curr_resync; /* last block scheduled */
|
||||
unsigned long resync_mark; /* a recent timestamp */
|
||||
sector_t resync_mark_cnt;/* blocks written at resync_mark */
|
||||
sector_t curr_mark_cnt; /* blocks scheduled now */
|
||||
|
||||
sector_t resync_max_sectors; /* may be set by personality */
|
||||
|
||||
|
|
|
|||
|
|
@ -2,6 +2,8 @@
|
|||
#define _ROOT_DEV_H_
|
||||
|
||||
#include <linux/major.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/kdev_t.h>
|
||||
|
||||
enum {
|
||||
Root_NFS = MKDEV(UNNAMED_MAJOR, 255),
|
||||
|
|
|
|||
|
|
@ -61,12 +61,25 @@ extern void downgrade_write(struct rw_semaphore *sem);
|
|||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
/*
|
||||
* nested locking:
|
||||
* nested locking. NOTE: rwsems are not allowed to recurse
|
||||
* (which occurs if the same task tries to acquire the same
|
||||
* lock instance multiple times), but multiple locks of the
|
||||
* same lock class might be taken, if the order of the locks
|
||||
* is always the same. This ordering rule can be expressed
|
||||
* to lockdep via the _nested() APIs, but enumerating the
|
||||
* subclasses that are used. (If the nesting relationship is
|
||||
* static then another method for expressing nested locking is
|
||||
* the explicit definition of lock class keys and the use of
|
||||
* lockdep_set_class() at lock initialization time.
|
||||
* See Documentation/lockdep-design.txt for more details.)
|
||||
*/
|
||||
extern void down_read_nested(struct rw_semaphore *sem, int subclass);
|
||||
extern void down_write_nested(struct rw_semaphore *sem, int subclass);
|
||||
/*
|
||||
* Take/release a lock when not the owner will release it:
|
||||
* Take/release a lock when not the owner will release it.
|
||||
*
|
||||
* [ This API should be avoided as much as possible - the
|
||||
* proper abstraction for this case is completions. ]
|
||||
*/
|
||||
extern void down_read_non_owner(struct rw_semaphore *sem);
|
||||
extern void up_read_non_owner(struct rw_semaphore *sem);
|
||||
|
|
|
|||
|
|
@ -463,6 +463,10 @@ struct signal_struct {
|
|||
#ifdef CONFIG_BSD_PROCESS_ACCT
|
||||
struct pacct_struct pacct; /* per-process accounting information */
|
||||
#endif
|
||||
#ifdef CONFIG_TASKSTATS
|
||||
spinlock_t stats_lock;
|
||||
struct taskstats *stats;
|
||||
#endif
|
||||
};
|
||||
|
||||
/* Context switch must be unlocked if interrupts are to be enabled */
|
||||
|
|
@ -537,7 +541,7 @@ extern struct user_struct root_user;
|
|||
struct backing_dev_info;
|
||||
struct reclaim_state;
|
||||
|
||||
#ifdef CONFIG_SCHEDSTATS
|
||||
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
|
||||
struct sched_info {
|
||||
/* cumulative counters */
|
||||
unsigned long cpu_time, /* time spent on the cpu */
|
||||
|
|
@ -548,9 +552,53 @@ struct sched_info {
|
|||
unsigned long last_arrival, /* when we last ran on a cpu */
|
||||
last_queued; /* when we were last queued to run */
|
||||
};
|
||||
#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
|
||||
|
||||
#ifdef CONFIG_SCHEDSTATS
|
||||
extern struct file_operations proc_schedstat_operations;
|
||||
#endif /* CONFIG_SCHEDSTATS */
|
||||
|
||||
#ifdef CONFIG_TASK_DELAY_ACCT
|
||||
struct task_delay_info {
|
||||
spinlock_t lock;
|
||||
unsigned int flags; /* Private per-task flags */
|
||||
|
||||
/* For each stat XXX, add following, aligned appropriately
|
||||
*
|
||||
* struct timespec XXX_start, XXX_end;
|
||||
* u64 XXX_delay;
|
||||
* u32 XXX_count;
|
||||
*
|
||||
* Atomicity of updates to XXX_delay, XXX_count protected by
|
||||
* single lock above (split into XXX_lock if contention is an issue).
|
||||
*/
|
||||
|
||||
/*
|
||||
* XXX_count is incremented on every XXX operation, the delay
|
||||
* associated with the operation is added to XXX_delay.
|
||||
* XXX_delay contains the accumulated delay time in nanoseconds.
|
||||
*/
|
||||
struct timespec blkio_start, blkio_end; /* Shared by blkio, swapin */
|
||||
u64 blkio_delay; /* wait for sync block io completion */
|
||||
u64 swapin_delay; /* wait for swapin block io completion */
|
||||
u32 blkio_count; /* total count of the number of sync block */
|
||||
/* io operations performed */
|
||||
u32 swapin_count; /* total count of the number of swapin block */
|
||||
/* io operations performed */
|
||||
};
|
||||
#endif /* CONFIG_TASK_DELAY_ACCT */
|
||||
|
||||
static inline int sched_info_on(void)
|
||||
{
|
||||
#ifdef CONFIG_SCHEDSTATS
|
||||
return 1;
|
||||
#elif defined(CONFIG_TASK_DELAY_ACCT)
|
||||
extern int delayacct_on;
|
||||
return delayacct_on;
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
enum idle_type
|
||||
{
|
||||
|
|
@ -747,7 +795,7 @@ struct task_struct {
|
|||
cpumask_t cpus_allowed;
|
||||
unsigned int time_slice, first_time_slice;
|
||||
|
||||
#ifdef CONFIG_SCHEDSTATS
|
||||
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
|
||||
struct sched_info sched_info;
|
||||
#endif
|
||||
|
||||
|
|
@ -945,6 +993,9 @@ struct task_struct {
|
|||
* cache last used pipe for splice
|
||||
*/
|
||||
struct pipe_inode_info *splice_pipe;
|
||||
#ifdef CONFIG_TASK_DELAY_ACCT
|
||||
struct task_delay_info *delays;
|
||||
#endif
|
||||
};
|
||||
|
||||
static inline pid_t process_group(struct task_struct *tsk)
|
||||
|
|
@ -1505,6 +1556,14 @@ static inline void freeze(struct task_struct *p)
|
|||
p->flags |= PF_FREEZE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Sometimes we may need to cancel the previous 'freeze' request
|
||||
*/
|
||||
static inline void do_not_freeze(struct task_struct *p)
|
||||
{
|
||||
p->flags &= ~PF_FREEZE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Wake up a frozen process
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -1109,6 +1109,16 @@ struct swap_info_struct;
|
|||
* @name contains the name of the security module being unstacked.
|
||||
* @ops contains a pointer to the struct security_operations of the module to unstack.
|
||||
*
|
||||
* @secid_to_secctx:
|
||||
* Convert secid to security context.
|
||||
* @secid contains the security ID.
|
||||
* @secdata contains the pointer that stores the converted security context.
|
||||
*
|
||||
* @release_secctx:
|
||||
* Release the security context.
|
||||
* @secdata contains the security context.
|
||||
* @seclen contains the length of the security context.
|
||||
*
|
||||
* This is the main security structure.
|
||||
*/
|
||||
struct security_operations {
|
||||
|
|
@ -1289,6 +1299,8 @@ struct security_operations {
|
|||
|
||||
int (*getprocattr)(struct task_struct *p, char *name, void *value, size_t size);
|
||||
int (*setprocattr)(struct task_struct *p, char *name, void *value, size_t size);
|
||||
int (*secid_to_secctx)(u32 secid, char **secdata, u32 *seclen);
|
||||
void (*release_secctx)(char *secdata, u32 seclen);
|
||||
|
||||
#ifdef CONFIG_SECURITY_NETWORK
|
||||
int (*unix_stream_connect) (struct socket * sock,
|
||||
|
|
@ -1317,7 +1329,7 @@ struct security_operations {
|
|||
int (*socket_shutdown) (struct socket * sock, int how);
|
||||
int (*socket_sock_rcv_skb) (struct sock * sk, struct sk_buff * skb);
|
||||
int (*socket_getpeersec_stream) (struct socket *sock, char __user *optval, int __user *optlen, unsigned len);
|
||||
int (*socket_getpeersec_dgram) (struct sk_buff *skb, char **secdata, u32 *seclen);
|
||||
int (*socket_getpeersec_dgram) (struct socket *sock, struct sk_buff *skb, u32 *secid);
|
||||
int (*sk_alloc_security) (struct sock *sk, int family, gfp_t priority);
|
||||
void (*sk_free_security) (struct sock *sk);
|
||||
unsigned int (*sk_getsid) (struct sock *sk, struct flowi *fl, u8 dir);
|
||||
|
|
@ -2059,6 +2071,16 @@ static inline int security_netlink_recv(struct sk_buff * skb, int cap)
|
|||
return security_ops->netlink_recv(skb, cap);
|
||||
}
|
||||
|
||||
static inline int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
|
||||
{
|
||||
return security_ops->secid_to_secctx(secid, secdata, seclen);
|
||||
}
|
||||
|
||||
static inline void security_release_secctx(char *secdata, u32 seclen)
|
||||
{
|
||||
return security_ops->release_secctx(secdata, seclen);
|
||||
}
|
||||
|
||||
/* prototypes */
|
||||
extern int security_init (void);
|
||||
extern int register_security (struct security_operations *ops);
|
||||
|
|
@ -2725,6 +2747,14 @@ static inline void securityfs_remove(struct dentry *dentry)
|
|||
{
|
||||
}
|
||||
|
||||
static inline int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline void security_release_secctx(char *secdata, u32 seclen)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_SECURITY */
|
||||
|
||||
#ifdef CONFIG_SECURITY_NETWORK
|
||||
|
|
@ -2840,10 +2870,9 @@ static inline int security_socket_getpeersec_stream(struct socket *sock, char __
|
|||
return security_ops->socket_getpeersec_stream(sock, optval, optlen, len);
|
||||
}
|
||||
|
||||
static inline int security_socket_getpeersec_dgram(struct sk_buff *skb, char **secdata,
|
||||
u32 *seclen)
|
||||
static inline int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid)
|
||||
{
|
||||
return security_ops->socket_getpeersec_dgram(skb, secdata, seclen);
|
||||
return security_ops->socket_getpeersec_dgram(sock, skb, secid);
|
||||
}
|
||||
|
||||
static inline int security_sk_alloc(struct sock *sk, int family, gfp_t priority)
|
||||
|
|
@ -2968,8 +2997,7 @@ static inline int security_socket_getpeersec_stream(struct socket *sock, char __
|
|||
return -ENOPROTOOPT;
|
||||
}
|
||||
|
||||
static inline int security_socket_getpeersec_dgram(struct sk_buff *skb, char **secdata,
|
||||
u32 *seclen)
|
||||
static inline int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid)
|
||||
{
|
||||
return -ENOPROTOOPT;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -227,6 +227,7 @@ struct uart_port {
|
|||
#define UPIO_MEM (2)
|
||||
#define UPIO_MEM32 (3)
|
||||
#define UPIO_AU (4) /* Au1x00 type IO */
|
||||
#define UPIO_TSI (5) /* Tsi108/109 type IO */
|
||||
|
||||
unsigned int read_status_mask; /* driver specific */
|
||||
unsigned int ignore_status_mask; /* driver specific */
|
||||
|
|
|
|||
|
|
@ -604,12 +604,17 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
|
|||
return list_->qlen;
|
||||
}
|
||||
|
||||
extern struct lock_class_key skb_queue_lock_key;
|
||||
|
||||
/*
|
||||
* This function creates a split out lock class for each invocation;
|
||||
* this is needed for now since a whole lot of users of the skb-queue
|
||||
* infrastructure in drivers have different locking usage (in hardirq)
|
||||
* than the networking core (in softirq only). In the long run either the
|
||||
* network layer or drivers should need annotation to consolidate the
|
||||
* main types of usage into 3 classes.
|
||||
*/
|
||||
static inline void skb_queue_head_init(struct sk_buff_head *list)
|
||||
{
|
||||
spin_lock_init(&list->lock);
|
||||
lockdep_set_class(&list->lock, &skb_queue_lock_key);
|
||||
list->prev = list->next = (struct sk_buff *)list;
|
||||
list->qlen = 0;
|
||||
}
|
||||
|
|
@ -1034,6 +1039,21 @@ static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
|
|||
return (len < skb->len) ? __pskb_trim(skb, len) : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* pskb_trim_unique - remove end from a paged unique (not cloned) buffer
|
||||
* @skb: buffer to alter
|
||||
* @len: new length
|
||||
*
|
||||
* This is identical to pskb_trim except that the caller knows that
|
||||
* the skb is not cloned so we should never get an error due to out-
|
||||
* of-memory.
|
||||
*/
|
||||
static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
|
||||
{
|
||||
int err = pskb_trim(skb, len);
|
||||
BUG_ON(err);
|
||||
}
|
||||
|
||||
/**
|
||||
* skb_orphan - orphan a buffer
|
||||
* @skb: buffer to orphan
|
||||
|
|
@ -1066,9 +1086,8 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
|
|||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_HAVE_ARCH_DEV_ALLOC_SKB
|
||||
/**
|
||||
* __dev_alloc_skb - allocate an skbuff for sending
|
||||
* __dev_alloc_skb - allocate an skbuff for receiving
|
||||
* @length: length to allocate
|
||||
* @gfp_mask: get_free_pages mask, passed to alloc_skb
|
||||
*
|
||||
|
|
@ -1077,7 +1096,7 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
|
|||
* the headroom they think they need without accounting for the
|
||||
* built in space. The built in space is used for optimisations.
|
||||
*
|
||||
* %NULL is returned in there is no free memory.
|
||||
* %NULL is returned if there is no free memory.
|
||||
*/
|
||||
static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
|
||||
gfp_t gfp_mask)
|
||||
|
|
@ -1087,12 +1106,9 @@ static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
|
|||
skb_reserve(skb, NET_SKB_PAD);
|
||||
return skb;
|
||||
}
|
||||
#else
|
||||
extern struct sk_buff *__dev_alloc_skb(unsigned int length, int gfp_mask);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* dev_alloc_skb - allocate an skbuff for sending
|
||||
* dev_alloc_skb - allocate an skbuff for receiving
|
||||
* @length: length to allocate
|
||||
*
|
||||
* Allocate a new &sk_buff and assign it a usage count of one. The
|
||||
|
|
@ -1100,7 +1116,7 @@ extern struct sk_buff *__dev_alloc_skb(unsigned int length, int gfp_mask);
|
|||
* the headroom they think they need without accounting for the
|
||||
* built in space. The built in space is used for optimisations.
|
||||
*
|
||||
* %NULL is returned in there is no free memory. Although this function
|
||||
* %NULL is returned if there is no free memory. Although this function
|
||||
* allocates memory it can be called from an interrupt.
|
||||
*/
|
||||
static inline struct sk_buff *dev_alloc_skb(unsigned int length)
|
||||
|
|
@ -1108,6 +1124,28 @@ static inline struct sk_buff *dev_alloc_skb(unsigned int length)
|
|||
return __dev_alloc_skb(length, GFP_ATOMIC);
|
||||
}
|
||||
|
||||
extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
|
||||
unsigned int length, gfp_t gfp_mask);
|
||||
|
||||
/**
|
||||
* netdev_alloc_skb - allocate an skbuff for rx on a specific device
|
||||
* @dev: network device to receive on
|
||||
* @length: length to allocate
|
||||
*
|
||||
* Allocate a new &sk_buff and assign it a usage count of one. The
|
||||
* buffer has unspecified headroom built in. Users should allocate
|
||||
* the headroom they think they need without accounting for the
|
||||
* built in space. The built in space is used for optimisations.
|
||||
*
|
||||
* %NULL is returned if there is no free memory. Although this function
|
||||
* allocates memory it can be called from an interrupt.
|
||||
*/
|
||||
static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
|
||||
unsigned int length)
|
||||
{
|
||||
return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
|
||||
}
|
||||
|
||||
/**
|
||||
* skb_cow - copy header of skb when it is required
|
||||
* @skb: buffer to cow
|
||||
|
|
@ -1455,5 +1493,10 @@ static inline void skb_init_secmark(struct sk_buff *skb)
|
|||
{ }
|
||||
#endif
|
||||
|
||||
static inline int skb_is_gso(const struct sk_buff *skb)
|
||||
{
|
||||
return skb_shinfo(skb)->gso_size;
|
||||
}
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _LINUX_SKBUFF_H */
|
||||
|
|
|
|||
|
|
@ -42,9 +42,9 @@ RPC_I(struct inode *inode)
|
|||
extern int rpc_queue_upcall(struct inode *, struct rpc_pipe_msg *);
|
||||
|
||||
extern struct dentry *rpc_mkdir(char *, struct rpc_clnt *);
|
||||
extern int rpc_rmdir(char *);
|
||||
extern int rpc_rmdir(struct dentry *);
|
||||
extern struct dentry *rpc_mkpipe(char *, void *, struct rpc_pipe_ops *, int flags);
|
||||
extern int rpc_unlink(char *);
|
||||
extern int rpc_unlink(struct dentry *);
|
||||
extern struct vfsmount *rpc_get_mount(void);
|
||||
extern void rpc_put_mount(void);
|
||||
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ extern unsigned int xprt_max_resvport;
|
|||
|
||||
#define RPC_MIN_RESVPORT (1U)
|
||||
#define RPC_MAX_RESVPORT (65535U)
|
||||
#define RPC_DEF_MIN_RESVPORT (650U)
|
||||
#define RPC_DEF_MIN_RESVPORT (665U)
|
||||
#define RPC_DEF_MAX_RESVPORT (1023U)
|
||||
|
||||
/*
|
||||
|
|
@ -229,7 +229,7 @@ int xprt_reserve_xprt(struct rpc_task *task);
|
|||
int xprt_reserve_xprt_cong(struct rpc_task *task);
|
||||
int xprt_prepare_transmit(struct rpc_task *task);
|
||||
void xprt_transmit(struct rpc_task *task);
|
||||
void xprt_abort_transmit(struct rpc_task *task);
|
||||
void xprt_end_transmit(struct rpc_task *task);
|
||||
int xprt_adjust_timeout(struct rpc_rqst *req);
|
||||
void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
|
||||
void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
|
||||
|
|
|
|||
137
include/linux/taskstats.h
Normal file
137
include/linux/taskstats.h
Normal file
|
|
@ -0,0 +1,137 @@
|
|||
/* taskstats.h - exporting per-task statistics
|
||||
*
|
||||
* Copyright (C) Shailabh Nagar, IBM Corp. 2006
|
||||
* (C) Balbir Singh, IBM Corp. 2006
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of version 2.1 of the GNU Lesser General Public License
|
||||
* as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it would be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_TASKSTATS_H
|
||||
#define _LINUX_TASKSTATS_H
|
||||
|
||||
/* Format for per-task data returned to userland when
|
||||
* - a task exits
|
||||
* - listener requests stats for a task
|
||||
*
|
||||
* The struct is versioned. Newer versions should only add fields to
|
||||
* the bottom of the struct to maintain backward compatibility.
|
||||
*
|
||||
*
|
||||
* To add new fields
|
||||
* a) bump up TASKSTATS_VERSION
|
||||
* b) add comment indicating new version number at end of struct
|
||||
* c) add new fields after version comment; maintain 64-bit alignment
|
||||
*/
|
||||
|
||||
#define TASKSTATS_VERSION 1
|
||||
|
||||
struct taskstats {
|
||||
|
||||
/* Version 1 */
|
||||
__u16 version;
|
||||
__u16 padding[3]; /* Userspace should not interpret the padding
|
||||
* field which can be replaced by useful
|
||||
* fields if struct taskstats is extended.
|
||||
*/
|
||||
|
||||
/* Delay accounting fields start
|
||||
*
|
||||
* All values, until comment "Delay accounting fields end" are
|
||||
* available only if delay accounting is enabled, even though the last
|
||||
* few fields are not delays
|
||||
*
|
||||
* xxx_count is the number of delay values recorded
|
||||
* xxx_delay_total is the corresponding cumulative delay in nanoseconds
|
||||
*
|
||||
* xxx_delay_total wraps around to zero on overflow
|
||||
* xxx_count incremented regardless of overflow
|
||||
*/
|
||||
|
||||
/* Delay waiting for cpu, while runnable
|
||||
* count, delay_total NOT updated atomically
|
||||
*/
|
||||
__u64 cpu_count;
|
||||
__u64 cpu_delay_total;
|
||||
|
||||
/* Following four fields atomically updated using task->delays->lock */
|
||||
|
||||
/* Delay waiting for synchronous block I/O to complete
|
||||
* does not account for delays in I/O submission
|
||||
*/
|
||||
__u64 blkio_count;
|
||||
__u64 blkio_delay_total;
|
||||
|
||||
/* Delay waiting for page fault I/O (swap in only) */
|
||||
__u64 swapin_count;
|
||||
__u64 swapin_delay_total;
|
||||
|
||||
/* cpu "wall-clock" running time
|
||||
* On some architectures, value will adjust for cpu time stolen
|
||||
* from the kernel in involuntary waits due to virtualization.
|
||||
* Value is cumulative, in nanoseconds, without a corresponding count
|
||||
* and wraps around to zero silently on overflow
|
||||
*/
|
||||
__u64 cpu_run_real_total;
|
||||
|
||||
/* cpu "virtual" running time
|
||||
* Uses time intervals seen by the kernel i.e. no adjustment
|
||||
* for kernel's involuntary waits due to virtualization.
|
||||
* Value is cumulative, in nanoseconds, without a corresponding count
|
||||
* and wraps around to zero silently on overflow
|
||||
*/
|
||||
__u64 cpu_run_virtual_total;
|
||||
/* Delay accounting fields end */
|
||||
/* version 1 ends here */
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* Commands sent from userspace
|
||||
* Not versioned. New commands should only be inserted at the enum's end
|
||||
* prior to __TASKSTATS_CMD_MAX
|
||||
*/
|
||||
|
||||
enum {
|
||||
TASKSTATS_CMD_UNSPEC = 0, /* Reserved */
|
||||
TASKSTATS_CMD_GET, /* user->kernel request/get-response */
|
||||
TASKSTATS_CMD_NEW, /* kernel->user event */
|
||||
__TASKSTATS_CMD_MAX,
|
||||
};
|
||||
|
||||
#define TASKSTATS_CMD_MAX (__TASKSTATS_CMD_MAX - 1)
|
||||
|
||||
enum {
|
||||
TASKSTATS_TYPE_UNSPEC = 0, /* Reserved */
|
||||
TASKSTATS_TYPE_PID, /* Process id */
|
||||
TASKSTATS_TYPE_TGID, /* Thread group id */
|
||||
TASKSTATS_TYPE_STATS, /* taskstats structure */
|
||||
TASKSTATS_TYPE_AGGR_PID, /* contains pid + stats */
|
||||
TASKSTATS_TYPE_AGGR_TGID, /* contains tgid + stats */
|
||||
__TASKSTATS_TYPE_MAX,
|
||||
};
|
||||
|
||||
#define TASKSTATS_TYPE_MAX (__TASKSTATS_TYPE_MAX - 1)
|
||||
|
||||
enum {
|
||||
TASKSTATS_CMD_ATTR_UNSPEC = 0,
|
||||
TASKSTATS_CMD_ATTR_PID,
|
||||
TASKSTATS_CMD_ATTR_TGID,
|
||||
TASKSTATS_CMD_ATTR_REGISTER_CPUMASK,
|
||||
TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK,
|
||||
__TASKSTATS_CMD_ATTR_MAX,
|
||||
};
|
||||
|
||||
#define TASKSTATS_CMD_ATTR_MAX (__TASKSTATS_CMD_ATTR_MAX - 1)
|
||||
|
||||
/* NETLINK_GENERIC related info */
|
||||
|
||||
#define TASKSTATS_GENL_NAME "TASKSTATS"
|
||||
#define TASKSTATS_GENL_VERSION 0x1
|
||||
|
||||
#endif /* _LINUX_TASKSTATS_H */
|
||||
89
include/linux/taskstats_kern.h
Normal file
89
include/linux/taskstats_kern.h
Normal file
|
|
@ -0,0 +1,89 @@
|
|||
/* taskstats_kern.h - kernel header for per-task statistics interface
|
||||
*
|
||||
* Copyright (C) Shailabh Nagar, IBM Corp. 2006
|
||||
* (C) Balbir Singh, IBM Corp. 2006
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_TASKSTATS_KERN_H
|
||||
#define _LINUX_TASKSTATS_KERN_H
|
||||
|
||||
#include <linux/taskstats.h>
|
||||
#include <linux/sched.h>
|
||||
#include <net/genetlink.h>
|
||||
|
||||
#ifdef CONFIG_TASKSTATS
|
||||
extern kmem_cache_t *taskstats_cache;
|
||||
extern struct mutex taskstats_exit_mutex;
|
||||
|
||||
static inline void taskstats_exit_free(struct taskstats *tidstats)
|
||||
{
|
||||
if (tidstats)
|
||||
kmem_cache_free(taskstats_cache, tidstats);
|
||||
}
|
||||
|
||||
static inline void taskstats_tgid_init(struct signal_struct *sig)
|
||||
{
|
||||
spin_lock_init(&sig->stats_lock);
|
||||
sig->stats = NULL;
|
||||
}
|
||||
|
||||
static inline void taskstats_tgid_alloc(struct signal_struct *sig)
|
||||
{
|
||||
struct taskstats *stats;
|
||||
unsigned long flags;
|
||||
|
||||
stats = kmem_cache_zalloc(taskstats_cache, SLAB_KERNEL);
|
||||
if (!stats)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&sig->stats_lock, flags);
|
||||
if (!sig->stats) {
|
||||
sig->stats = stats;
|
||||
stats = NULL;
|
||||
}
|
||||
spin_unlock_irqrestore(&sig->stats_lock, flags);
|
||||
|
||||
if (stats)
|
||||
kmem_cache_free(taskstats_cache, stats);
|
||||
}
|
||||
|
||||
static inline void taskstats_tgid_free(struct signal_struct *sig)
|
||||
{
|
||||
struct taskstats *stats = NULL;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&sig->stats_lock, flags);
|
||||
if (sig->stats) {
|
||||
stats = sig->stats;
|
||||
sig->stats = NULL;
|
||||
}
|
||||
spin_unlock_irqrestore(&sig->stats_lock, flags);
|
||||
if (stats)
|
||||
kmem_cache_free(taskstats_cache, stats);
|
||||
}
|
||||
|
||||
extern void taskstats_exit_alloc(struct taskstats **, unsigned int *);
|
||||
extern void taskstats_exit_send(struct task_struct *, struct taskstats *, int, unsigned int);
|
||||
extern void taskstats_init_early(void);
|
||||
extern void taskstats_tgid_alloc(struct signal_struct *);
|
||||
#else
|
||||
static inline void taskstats_exit_alloc(struct taskstats **ptidstats, unsigned int *mycpu)
|
||||
{}
|
||||
static inline void taskstats_exit_free(struct taskstats *ptidstats)
|
||||
{}
|
||||
static inline void taskstats_exit_send(struct task_struct *tsk,
|
||||
struct taskstats *tidstats,
|
||||
int group_dead, unsigned int cpu)
|
||||
{}
|
||||
static inline void taskstats_tgid_init(struct signal_struct *sig)
|
||||
{}
|
||||
static inline void taskstats_tgid_alloc(struct signal_struct *sig)
|
||||
{}
|
||||
static inline void taskstats_tgid_free(struct signal_struct *sig)
|
||||
{}
|
||||
static inline void taskstats_init_early(void)
|
||||
{}
|
||||
#endif /* CONFIG_TASKSTATS */
|
||||
|
||||
#endif
|
||||
|
||||
|
|
@ -70,6 +70,18 @@ extern unsigned long mktime(const unsigned int year, const unsigned int mon,
|
|||
|
||||
extern void set_normalized_timespec(struct timespec *ts, time_t sec, long nsec);
|
||||
|
||||
/*
|
||||
* sub = lhs - rhs, in normalized form
|
||||
*/
|
||||
static inline struct timespec timespec_sub(struct timespec lhs,
|
||||
struct timespec rhs)
|
||||
{
|
||||
struct timespec ts_delta;
|
||||
set_normalized_timespec(&ts_delta, lhs.tv_sec - rhs.tv_sec,
|
||||
lhs.tv_nsec - rhs.tv_nsec);
|
||||
return ts_delta;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true if the timespec is norm, false if denorm:
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -57,7 +57,6 @@
|
|||
#include <linux/time.h>
|
||||
|
||||
#include <asm/param.h>
|
||||
#include <asm/timex.h>
|
||||
|
||||
/*
|
||||
* SHIFT_KG and SHIFT_KF establish the damping of the PLL and are chosen
|
||||
|
|
@ -191,6 +190,8 @@ struct timex {
|
|||
#define TIME_BAD TIME_ERROR /* bw compat */
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#include <asm/timex.h>
|
||||
|
||||
/*
|
||||
* kernel variables
|
||||
* Note: maximum error = NTP synch distance = dispersion + delay / 2;
|
||||
|
|
|
|||
|
|
@ -5,16 +5,6 @@
|
|||
* 'tty.h' defines some structures used by tty_io.c and some defines.
|
||||
*/
|
||||
|
||||
/*
|
||||
* These constants are also useful for user-level apps (e.g., VC
|
||||
* resizing).
|
||||
*/
|
||||
#define MIN_NR_CONSOLES 1 /* must be at least 1 */
|
||||
#define MAX_NR_CONSOLES 63 /* serial lines start at 64 */
|
||||
#define MAX_NR_USER_CONSOLES 63 /* must be root to allocate above this */
|
||||
/* Note: the ioctl VT_GETSTATE does not work for
|
||||
consoles 16 and higher (since it returns a short) */
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#include <linux/fs.h>
|
||||
#include <linux/major.h>
|
||||
|
|
@ -22,7 +12,6 @@
|
|||
#include <linux/workqueue.h>
|
||||
#include <linux/tty_driver.h>
|
||||
#include <linux/tty_ldisc.h>
|
||||
#include <linux/screen_info.h>
|
||||
#include <linux/mutex.h>
|
||||
|
||||
#include <asm/system.h>
|
||||
|
|
@ -70,6 +59,7 @@ struct tty_bufhead {
|
|||
struct tty_buffer *head; /* Queue head */
|
||||
struct tty_buffer *tail; /* Active buffer */
|
||||
struct tty_buffer *free; /* Free queue head */
|
||||
int memory_used; /* Buffer space used excluding free queue */
|
||||
};
|
||||
/*
|
||||
* The pty uses char_buf and flag_buf as a contiguous buffer
|
||||
|
|
@ -270,7 +260,6 @@ struct tty_struct {
|
|||
extern void tty_write_flush(struct tty_struct *);
|
||||
|
||||
extern struct termios tty_std_termios;
|
||||
extern int fg_console, last_console, want_console;
|
||||
|
||||
extern int kmsg_redirect;
|
||||
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ struct ep_device;
|
|||
* @urb_list: urbs queued to this endpoint; maintained by usbcore
|
||||
* @hcpriv: for use by HCD; typically holds hardware dma queue head (QH)
|
||||
* with one or more transfer descriptors (TDs) per urb
|
||||
* @kobj: kobject for sysfs info
|
||||
* @ep_dev: ep_device for sysfs info
|
||||
* @extra: descriptors following this endpoint in the configuration
|
||||
* @extralen: how many bytes of "extra" are valid
|
||||
*
|
||||
|
|
@ -103,8 +103,7 @@ enum usb_interface_condition {
|
|||
* @condition: binding state of the interface: not bound, binding
|
||||
* (in probe()), bound to a driver, or unbinding (in disconnect())
|
||||
* @dev: driver model's view of this device
|
||||
* @usb_dev: if an interface is bound to the USB major, this will point
|
||||
* to the sysfs representation for that device.
|
||||
* @class_dev: driver model's class view of this device.
|
||||
*
|
||||
* USB device drivers attach to interfaces on a physical device. Each
|
||||
* interface encapsulates a single high level function, such as feeding
|
||||
|
|
@ -144,7 +143,7 @@ struct usb_interface {
|
|||
* bound to */
|
||||
enum usb_interface_condition condition; /* state of binding */
|
||||
struct device dev; /* interface specific device info */
|
||||
struct device *usb_dev; /* pointer to the usb class's device, if any */
|
||||
struct class_device *class_dev;
|
||||
};
|
||||
#define to_usb_interface(d) container_of(d, struct usb_interface, dev)
|
||||
#define interface_to_usbdev(intf) \
|
||||
|
|
@ -361,7 +360,7 @@ struct usb_device {
|
|||
char *serial; /* iSerialNumber string, if present */
|
||||
|
||||
struct list_head filelist;
|
||||
struct device *usbfs_dev;
|
||||
struct class_device *class_dev;
|
||||
struct dentry *usbfs_dentry; /* usbfs dentry entry for the device */
|
||||
|
||||
/*
|
||||
|
|
|
|||
300
include/linux/usb/serial.h
Normal file
300
include/linux/usb/serial.h
Normal file
|
|
@ -0,0 +1,300 @@
|
|||
/*
|
||||
* USB Serial Converter stuff
|
||||
*
|
||||
* Copyright (C) 1999 - 2005
|
||||
* Greg Kroah-Hartman (greg@kroah.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; version 2 of the License.
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
#ifndef __LINUX_USB_SERIAL_H
|
||||
#define __LINUX_USB_SERIAL_H
|
||||
|
||||
#include <linux/kref.h>
|
||||
#include <linux/mutex.h>
|
||||
|
||||
#define SERIAL_TTY_MAJOR 188 /* Nice legal number now */
|
||||
#define SERIAL_TTY_MINORS 255 /* loads of devices :) */
|
||||
|
||||
#define MAX_NUM_PORTS 8 /* The maximum number of ports one device can grab at once */
|
||||
|
||||
/* parity check flag */
|
||||
#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
|
||||
|
||||
/**
|
||||
* usb_serial_port: structure for the specific ports of a device.
|
||||
* @serial: pointer back to the struct usb_serial owner of this port.
|
||||
* @tty: pointer to the corresponding tty for this port.
|
||||
* @lock: spinlock to grab when updating portions of this structure.
|
||||
* @mutex: mutex used to synchronize serial_open() and serial_close()
|
||||
* access for this port.
|
||||
* @number: the number of the port (the minor number).
|
||||
* @interrupt_in_buffer: pointer to the interrupt in buffer for this port.
|
||||
* @interrupt_in_urb: pointer to the interrupt in struct urb for this port.
|
||||
* @interrupt_in_endpointAddress: endpoint address for the interrupt in pipe
|
||||
* for this port.
|
||||
* @interrupt_out_buffer: pointer to the interrupt out buffer for this port.
|
||||
* @interrupt_out_size: the size of the interrupt_out_buffer, in bytes.
|
||||
* @interrupt_out_urb: pointer to the interrupt out struct urb for this port.
|
||||
* @interrupt_out_endpointAddress: endpoint address for the interrupt out pipe
|
||||
* for this port.
|
||||
* @bulk_in_buffer: pointer to the bulk in buffer for this port.
|
||||
* @read_urb: pointer to the bulk in struct urb for this port.
|
||||
* @bulk_in_endpointAddress: endpoint address for the bulk in pipe for this
|
||||
* port.
|
||||
* @bulk_out_buffer: pointer to the bulk out buffer for this port.
|
||||
* @bulk_out_size: the size of the bulk_out_buffer, in bytes.
|
||||
* @write_urb: pointer to the bulk out struct urb for this port.
|
||||
* @bulk_out_endpointAddress: endpoint address for the bulk out pipe for this
|
||||
* port.
|
||||
* @write_wait: a wait_queue_head_t used by the port.
|
||||
* @work: work queue entry for the line discipline waking up.
|
||||
* @open_count: number of times this port has been opened.
|
||||
*
|
||||
* This structure is used by the usb-serial core and drivers for the specific
|
||||
* ports of a device.
|
||||
*/
|
||||
struct usb_serial_port {
|
||||
struct usb_serial * serial;
|
||||
struct tty_struct * tty;
|
||||
spinlock_t lock;
|
||||
struct mutex mutex;
|
||||
unsigned char number;
|
||||
|
||||
unsigned char * interrupt_in_buffer;
|
||||
struct urb * interrupt_in_urb;
|
||||
__u8 interrupt_in_endpointAddress;
|
||||
|
||||
unsigned char * interrupt_out_buffer;
|
||||
int interrupt_out_size;
|
||||
struct urb * interrupt_out_urb;
|
||||
__u8 interrupt_out_endpointAddress;
|
||||
|
||||
unsigned char * bulk_in_buffer;
|
||||
int bulk_in_size;
|
||||
struct urb * read_urb;
|
||||
__u8 bulk_in_endpointAddress;
|
||||
|
||||
unsigned char * bulk_out_buffer;
|
||||
int bulk_out_size;
|
||||
struct urb * write_urb;
|
||||
int write_urb_busy;
|
||||
__u8 bulk_out_endpointAddress;
|
||||
|
||||
wait_queue_head_t write_wait;
|
||||
struct work_struct work;
|
||||
int open_count;
|
||||
struct device dev;
|
||||
};
|
||||
#define to_usb_serial_port(d) container_of(d, struct usb_serial_port, dev)
|
||||
|
||||
/* get and set the port private data pointer helper functions */
|
||||
static inline void *usb_get_serial_port_data (struct usb_serial_port *port)
|
||||
{
|
||||
return dev_get_drvdata(&port->dev);
|
||||
}
|
||||
|
||||
static inline void usb_set_serial_port_data (struct usb_serial_port *port, void *data)
|
||||
{
|
||||
dev_set_drvdata(&port->dev, data);
|
||||
}
|
||||
|
||||
/**
|
||||
* usb_serial - structure used by the usb-serial core for a device
|
||||
* @dev: pointer to the struct usb_device for this device
|
||||
* @type: pointer to the struct usb_serial_driver for this device
|
||||
* @interface: pointer to the struct usb_interface for this device
|
||||
* @minor: the starting minor number for this device
|
||||
* @num_ports: the number of ports this device has
|
||||
* @num_interrupt_in: number of interrupt in endpoints we have
|
||||
* @num_interrupt_out: number of interrupt out endpoints we have
|
||||
* @num_bulk_in: number of bulk in endpoints we have
|
||||
* @num_bulk_out: number of bulk out endpoints we have
|
||||
* @port: array of struct usb_serial_port structures for the different ports.
|
||||
* @private: place to put any driver specific information that is needed. The
|
||||
* usb-serial driver is required to manage this data, the usb-serial core
|
||||
* will not touch this. Use usb_get_serial_data() and
|
||||
* usb_set_serial_data() to access this.
|
||||
*/
|
||||
struct usb_serial {
|
||||
struct usb_device * dev;
|
||||
struct usb_serial_driver * type;
|
||||
struct usb_interface * interface;
|
||||
unsigned char minor;
|
||||
unsigned char num_ports;
|
||||
unsigned char num_port_pointers;
|
||||
char num_interrupt_in;
|
||||
char num_interrupt_out;
|
||||
char num_bulk_in;
|
||||
char num_bulk_out;
|
||||
struct usb_serial_port * port[MAX_NUM_PORTS];
|
||||
struct kref kref;
|
||||
void * private;
|
||||
};
|
||||
#define to_usb_serial(d) container_of(d, struct usb_serial, kref)
|
||||
|
||||
#define NUM_DONT_CARE (-1)
|
||||
|
||||
/* get and set the serial private data pointer helper functions */
|
||||
static inline void *usb_get_serial_data (struct usb_serial *serial)
|
||||
{
|
||||
return serial->private;
|
||||
}
|
||||
|
||||
static inline void usb_set_serial_data (struct usb_serial *serial, void *data)
|
||||
{
|
||||
serial->private = data;
|
||||
}
|
||||
|
||||
/**
|
||||
* usb_serial_driver - describes a usb serial driver
|
||||
* @description: pointer to a string that describes this driver. This string used
|
||||
* in the syslog messages when a device is inserted or removed.
|
||||
* @id_table: pointer to a list of usb_device_id structures that define all
|
||||
* of the devices this structure can support.
|
||||
* @num_interrupt_in: the number of interrupt in endpoints this device will
|
||||
* have.
|
||||
* @num_interrupt_out: the number of interrupt out endpoints this device will
|
||||
* have.
|
||||
* @num_bulk_in: the number of bulk in endpoints this device will have.
|
||||
* @num_bulk_out: the number of bulk out endpoints this device will have.
|
||||
* @num_ports: the number of different ports this device will have.
|
||||
* @calc_num_ports: pointer to a function to determine how many ports this
|
||||
* device has dynamically. It will be called after the probe()
|
||||
* callback is called, but before attach()
|
||||
* @probe: pointer to the driver's probe function.
|
||||
* This will be called when the device is inserted into the system,
|
||||
* but before the device has been fully initialized by the usb_serial
|
||||
* subsystem. Use this function to download any firmware to the device,
|
||||
* or any other early initialization that might be needed.
|
||||
* Return 0 to continue on with the initialization sequence. Anything
|
||||
* else will abort it.
|
||||
* @attach: pointer to the driver's attach function.
|
||||
* This will be called when the struct usb_serial structure is fully set
|
||||
* set up. Do any local initialization of the device, or any private
|
||||
* memory structure allocation at this point in time.
|
||||
* @shutdown: pointer to the driver's shutdown function. This will be
|
||||
* called when the device is removed from the system.
|
||||
*
|
||||
* This structure is defines a USB Serial driver. It provides all of
|
||||
* the information that the USB serial core code needs. If the function
|
||||
* pointers are defined, then the USB serial core code will call them when
|
||||
* the corresponding tty port functions are called. If they are not
|
||||
* called, the generic serial function will be used instead.
|
||||
*
|
||||
* The driver.owner field should be set to the module owner of this driver.
|
||||
* The driver.name field should be set to the name of this driver (remember
|
||||
* it will show up in sysfs, so it needs to be short and to the point.
|
||||
* Useing the module name is a good idea.)
|
||||
*/
|
||||
struct usb_serial_driver {
|
||||
const char *description;
|
||||
const struct usb_device_id *id_table;
|
||||
char num_interrupt_in;
|
||||
char num_interrupt_out;
|
||||
char num_bulk_in;
|
||||
char num_bulk_out;
|
||||
char num_ports;
|
||||
|
||||
struct list_head driver_list;
|
||||
struct device_driver driver;
|
||||
|
||||
int (*probe) (struct usb_serial *serial, const struct usb_device_id *id);
|
||||
int (*attach) (struct usb_serial *serial);
|
||||
int (*calc_num_ports) (struct usb_serial *serial);
|
||||
|
||||
void (*shutdown) (struct usb_serial *serial);
|
||||
|
||||
int (*port_probe) (struct usb_serial_port *port);
|
||||
int (*port_remove) (struct usb_serial_port *port);
|
||||
|
||||
/* serial function calls */
|
||||
int (*open) (struct usb_serial_port *port, struct file * filp);
|
||||
void (*close) (struct usb_serial_port *port, struct file * filp);
|
||||
int (*write) (struct usb_serial_port *port, const unsigned char *buf, int count);
|
||||
int (*write_room) (struct usb_serial_port *port);
|
||||
int (*ioctl) (struct usb_serial_port *port, struct file * file, unsigned int cmd, unsigned long arg);
|
||||
void (*set_termios) (struct usb_serial_port *port, struct termios * old);
|
||||
void (*break_ctl) (struct usb_serial_port *port, int break_state);
|
||||
int (*chars_in_buffer) (struct usb_serial_port *port);
|
||||
void (*throttle) (struct usb_serial_port *port);
|
||||
void (*unthrottle) (struct usb_serial_port *port);
|
||||
int (*tiocmget) (struct usb_serial_port *port, struct file *file);
|
||||
int (*tiocmset) (struct usb_serial_port *port, struct file *file, unsigned int set, unsigned int clear);
|
||||
|
||||
void (*read_int_callback)(struct urb *urb, struct pt_regs *regs);
|
||||
void (*write_int_callback)(struct urb *urb, struct pt_regs *regs);
|
||||
void (*read_bulk_callback)(struct urb *urb, struct pt_regs *regs);
|
||||
void (*write_bulk_callback)(struct urb *urb, struct pt_regs *regs);
|
||||
};
|
||||
#define to_usb_serial_driver(d) container_of(d, struct usb_serial_driver, driver)
|
||||
|
||||
extern int usb_serial_register(struct usb_serial_driver *driver);
|
||||
extern void usb_serial_deregister(struct usb_serial_driver *driver);
|
||||
extern void usb_serial_port_softint(struct usb_serial_port *port);
|
||||
|
||||
extern int usb_serial_probe(struct usb_interface *iface, const struct usb_device_id *id);
|
||||
extern void usb_serial_disconnect(struct usb_interface *iface);
|
||||
|
||||
extern int ezusb_writememory (struct usb_serial *serial, int address, unsigned char *data, int length, __u8 bRequest);
|
||||
extern int ezusb_set_reset (struct usb_serial *serial, unsigned char reset_bit);
|
||||
|
||||
/* USB Serial console functions */
|
||||
#ifdef CONFIG_USB_SERIAL_CONSOLE
|
||||
extern void usb_serial_console_init (int debug, int minor);
|
||||
extern void usb_serial_console_exit (void);
|
||||
extern void usb_serial_console_disconnect(struct usb_serial *serial);
|
||||
#else
|
||||
static inline void usb_serial_console_init (int debug, int minor) { }
|
||||
static inline void usb_serial_console_exit (void) { }
|
||||
static inline void usb_serial_console_disconnect(struct usb_serial *serial) {}
|
||||
#endif
|
||||
|
||||
/* Functions needed by other parts of the usbserial core */
|
||||
extern struct usb_serial *usb_serial_get_by_index (unsigned int minor);
|
||||
extern void usb_serial_put(struct usb_serial *serial);
|
||||
extern int usb_serial_generic_open (struct usb_serial_port *port, struct file *filp);
|
||||
extern int usb_serial_generic_write (struct usb_serial_port *port, const unsigned char *buf, int count);
|
||||
extern void usb_serial_generic_close (struct usb_serial_port *port, struct file *filp);
|
||||
extern int usb_serial_generic_write_room (struct usb_serial_port *port);
|
||||
extern int usb_serial_generic_chars_in_buffer (struct usb_serial_port *port);
|
||||
extern void usb_serial_generic_read_bulk_callback (struct urb *urb, struct pt_regs *regs);
|
||||
extern void usb_serial_generic_write_bulk_callback (struct urb *urb, struct pt_regs *regs);
|
||||
extern void usb_serial_generic_shutdown (struct usb_serial *serial);
|
||||
extern int usb_serial_generic_register (int debug);
|
||||
extern void usb_serial_generic_deregister (void);
|
||||
|
||||
extern int usb_serial_bus_register (struct usb_serial_driver *device);
|
||||
extern void usb_serial_bus_deregister (struct usb_serial_driver *device);
|
||||
|
||||
extern struct usb_serial_driver usb_serial_generic_device;
|
||||
extern struct bus_type usb_serial_bus_type;
|
||||
extern struct tty_driver *usb_serial_tty_driver;
|
||||
|
||||
static inline void usb_serial_debug_data(int debug,
|
||||
struct device *dev,
|
||||
const char *function, int size,
|
||||
const unsigned char *data)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (debug) {
|
||||
dev_printk(KERN_DEBUG, dev, "%s - length = %d, data = ", function, size);
|
||||
for (i = 0; i < size; ++i)
|
||||
printk ("%.2x ", data[i]);
|
||||
printk ("\n");
|
||||
}
|
||||
}
|
||||
|
||||
/* Use our own dbg macro */
|
||||
#undef dbg
|
||||
#define dbg(format, arg...) do { if (debug) printk(KERN_DEBUG "%s: " format "\n" , __FILE__ , ## arg); } while (0)
|
||||
|
||||
|
||||
|
||||
#endif /* ifdef __LINUX_USB_SERIAL_H */
|
||||
|
||||
|
|
@ -51,6 +51,9 @@
|
|||
#define USB_RECIP_INTERFACE 0x01
|
||||
#define USB_RECIP_ENDPOINT 0x02
|
||||
#define USB_RECIP_OTHER 0x03
|
||||
/* From Wireless USB 1.0 */
|
||||
#define USB_RECIP_PORT 0x04
|
||||
#define USB_RECIP_RPIPE 0x05
|
||||
|
||||
/*
|
||||
* Standard requests, for the bRequest field of a SETUP packet.
|
||||
|
|
@ -73,7 +76,9 @@
|
|||
|
||||
#define USB_REQ_SET_ENCRYPTION 0x0D /* Wireless USB */
|
||||
#define USB_REQ_GET_ENCRYPTION 0x0E
|
||||
#define USB_REQ_RPIPE_ABORT 0x0E
|
||||
#define USB_REQ_SET_HANDSHAKE 0x0F
|
||||
#define USB_REQ_RPIPE_RESET 0x0F
|
||||
#define USB_REQ_GET_HANDSHAKE 0x10
|
||||
#define USB_REQ_SET_CONNECTION 0x11
|
||||
#define USB_REQ_SET_SECURITY_DATA 0x12
|
||||
|
|
@ -159,6 +164,8 @@ struct usb_ctrlrequest {
|
|||
#define USB_DT_BOS 0x0f
|
||||
#define USB_DT_DEVICE_CAPABILITY 0x10
|
||||
#define USB_DT_WIRELESS_ENDPOINT_COMP 0x11
|
||||
#define USB_DT_WIRE_ADAPTER 0x21
|
||||
#define USB_DT_RPIPE 0x22
|
||||
|
||||
/* conventional codes for class-specific descriptors */
|
||||
#define USB_DT_CS_DEVICE 0x21
|
||||
|
|
|
|||
|
|
@ -872,9 +872,9 @@ int usb_gadget_config_buf(const struct usb_config_descriptor *config,
|
|||
/* utility wrapping a simple endpoint selection policy */
|
||||
|
||||
extern struct usb_ep *usb_ep_autoconfig (struct usb_gadget *,
|
||||
struct usb_endpoint_descriptor *) __init;
|
||||
struct usb_endpoint_descriptor *) __devinit;
|
||||
|
||||
extern void usb_ep_autoconfig_reset (struct usb_gadget *) __init;
|
||||
extern void usb_ep_autoconfig_reset (struct usb_gadget *) __devinit;
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
|
|
|
|||
|
|
@ -43,6 +43,10 @@
|
|||
/* Need delay after Command phase */ \
|
||||
US_FLAG(NO_WP_DETECT, 0x00000200) \
|
||||
/* Don't check for write-protect */ \
|
||||
US_FLAG(MAX_SECTORS_64, 0x00000400) \
|
||||
/* Sets max_sectors to 64 */ \
|
||||
US_FLAG(IGNORE_DEVICE, 0x00000800) \
|
||||
/* Don't claim device */
|
||||
|
||||
#define US_FLAG(name, value) US_FL_##name = value ,
|
||||
enum { US_DO_ALL_FLAGS };
|
||||
|
|
|
|||
|
|
@ -12,10 +12,10 @@
|
|||
#ifndef __LINUX_VIDEODEV_H
|
||||
#define __LINUX_VIDEODEV_H
|
||||
|
||||
#define HAVE_V4L1 1
|
||||
|
||||
#include <linux/videodev2.h>
|
||||
|
||||
#if defined(CONFIG_VIDEO_V4L1_COMPAT) || !defined (__KERNEL__)
|
||||
|
||||
struct video_capability
|
||||
{
|
||||
char name[32];
|
||||
|
|
@ -336,6 +336,8 @@ struct video_code
|
|||
#define VID_HARDWARE_SN9C102 38
|
||||
#define VID_HARDWARE_ARV 39
|
||||
|
||||
#endif /* CONFIG_VIDEO_V4L1_COMPAT */
|
||||
|
||||
#endif /* __LINUX_VIDEODEV_H */
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -22,8 +22,6 @@
|
|||
#endif
|
||||
#include <linux/types.h>
|
||||
|
||||
#define HAVE_V4L2 1
|
||||
|
||||
/*
|
||||
* Common stuff for both V4L1 and V4L2
|
||||
* Moved from videodev.h
|
||||
|
|
@ -716,7 +714,7 @@ struct v4l2_ext_control
|
|||
__s64 value64;
|
||||
void *reserved;
|
||||
};
|
||||
};
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct v4l2_ext_controls
|
||||
{
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@ struct vm_area_struct;
|
|||
#define VM_ALLOC 0x00000002 /* vmalloc() */
|
||||
#define VM_MAP 0x00000004 /* vmap()ed pages */
|
||||
#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
|
||||
#define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
|
||||
/* bits [20..32] reserved for arch specific ioremap internals */
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -41,23 +41,23 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
|
|||
|
||||
static inline void __count_vm_event(enum vm_event_item item)
|
||||
{
|
||||
__get_cpu_var(vm_event_states.event[item])++;
|
||||
__get_cpu_var(vm_event_states).event[item]++;
|
||||
}
|
||||
|
||||
static inline void count_vm_event(enum vm_event_item item)
|
||||
{
|
||||
get_cpu_var(vm_event_states.event[item])++;
|
||||
get_cpu_var(vm_event_states).event[item]++;
|
||||
put_cpu();
|
||||
}
|
||||
|
||||
static inline void __count_vm_events(enum vm_event_item item, long delta)
|
||||
{
|
||||
__get_cpu_var(vm_event_states.event[item]) += delta;
|
||||
__get_cpu_var(vm_event_states).event[item] += delta;
|
||||
}
|
||||
|
||||
static inline void count_vm_events(enum vm_event_item item, long delta)
|
||||
{
|
||||
get_cpu_var(vm_event_states.event[item])++;
|
||||
get_cpu_var(vm_event_states).event[item] += delta;
|
||||
put_cpu();
|
||||
}
|
||||
|
||||
|
|
@ -186,11 +186,16 @@ static inline void __mod_zone_page_state(struct zone *zone,
|
|||
zone_page_state_add(delta, zone, item);
|
||||
}
|
||||
|
||||
static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
|
||||
{
|
||||
atomic_long_inc(&zone->vm_stat[item]);
|
||||
atomic_long_inc(&vm_stat[item]);
|
||||
}
|
||||
|
||||
static inline void __inc_zone_page_state(struct page *page,
|
||||
enum zone_stat_item item)
|
||||
{
|
||||
atomic_long_inc(&page_zone(page)->vm_stat[item]);
|
||||
atomic_long_inc(&vm_stat[item]);
|
||||
__inc_zone_state(page_zone(page), item);
|
||||
}
|
||||
|
||||
static inline void __dec_zone_page_state(struct page *page,
|
||||
|
|
|
|||
|
|
@ -1,6 +1,16 @@
|
|||
#ifndef _LINUX_VT_H
|
||||
#define _LINUX_VT_H
|
||||
|
||||
/*
|
||||
* These constants are also useful for user-level apps (e.g., VC
|
||||
* resizing).
|
||||
*/
|
||||
#define MIN_NR_CONSOLES 1 /* must be at least 1 */
|
||||
#define MAX_NR_CONSOLES 63 /* serial lines start at 64 */
|
||||
#define MAX_NR_USER_CONSOLES 63 /* must be root to allocate above this */
|
||||
/* Note: the ioctl VT_GETSTATE does not work for
|
||||
consoles 16 and higher (since it returns a short) */
|
||||
|
||||
/* 0x56 is 'V', to avoid collision with termios and kd */
|
||||
|
||||
#define VT_OPENQRY 0x5600 /* find available vt */
|
||||
|
|
@ -50,5 +60,6 @@ struct vt_consize {
|
|||
#define VT_RESIZEX 0x560A /* set kernel's idea of screensize + more */
|
||||
#define VT_LOCKSWITCH 0x560B /* disallow vt switching */
|
||||
#define VT_UNLOCKSWITCH 0x560C /* allow vt switching */
|
||||
#define VT_GETHIFONTMASK 0x560D /* return hi font mask */
|
||||
|
||||
#endif /* _LINUX_VT_H */
|
||||
|
|
|
|||
|
|
@ -26,6 +26,7 @@
|
|||
|
||||
extern void kd_mksound(unsigned int hz, unsigned int ticks);
|
||||
extern int kbd_rate(struct kbd_repeat *rep);
|
||||
extern int fg_console, last_console, want_console;
|
||||
|
||||
/* console.c */
|
||||
|
||||
|
|
|
|||
|
|
@ -77,17 +77,7 @@ struct task_struct;
|
|||
#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
|
||||
{ .flags = word, .bit_nr = bit, }
|
||||
|
||||
/*
|
||||
* lockdep: we want one lock-class for all waitqueue locks.
|
||||
*/
|
||||
extern struct lock_class_key waitqueue_lock_key;
|
||||
|
||||
static inline void init_waitqueue_head(wait_queue_head_t *q)
|
||||
{
|
||||
spin_lock_init(&q->lock);
|
||||
lockdep_set_class(&q->lock, &waitqueue_lock_key);
|
||||
INIT_LIST_HEAD(&q->task_list);
|
||||
}
|
||||
extern void init_waitqueue_head(wait_queue_head_t *q);
|
||||
|
||||
static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
|
||||
{
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue