diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-06-01 10:34:35 -0700 | 
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-06-01 10:34:35 -0700 | 
| commit | 1193755ac6328ad240ba987e6ec41d5e8baf0680 (patch) | |
| tree | 40bf847d7e3ebaa57b107151d14e6cd1d280cc6d /include/linux/lglock.h | |
| parent | 4edebed86690eb8db9af3ab85baf4a34e73266cc (diff) | |
| parent | 0ef97dcfce4179a2eba046b855ee2f91d6f1b414 (diff) | |
| download | olio-linux-3.10-1193755ac6328ad240ba987e6ec41d5e8baf0680.tar.xz olio-linux-3.10-1193755ac6328ad240ba987e6ec41d5e8baf0680.zip  | |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull vfs changes from Al Viro.
 "A lot of misc stuff.  The obvious groups:
   * Miklos' atomic_open series; kills the damn abuse of
     ->d_revalidate() by NFS, which was the major stumbling block for
     all work in that area.
   * ripping security_file_mmap() and dealing with deadlocks in the
     area; sanitizing the neighborhood of vm_mmap()/vm_munmap() in
     general.
   * ->encode_fh() switched to saner API; insane fake dentry in
     mm/cleancache.c gone.
   * assorted annotations in fs (endianness, __user)
   * parts of Artem's ->s_dirty work (jff2 and reiserfs parts)
   * ->update_time() work from Josef.
   * other bits and pieces all over the place.
  Normally it would've been in two or three pull requests, but
  signal.git stuff had eaten a lot of time during this cycle ;-/"
Fix up trivial conflicts in Documentation/filesystems/vfs.txt (the
'truncate_range' inode method was removed by the VM changes, the VFS
update adds an 'update_time()' method), and in fs/btrfs/ulist.[ch] (due
to sparse fix added twice, with other changes nearby).
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (95 commits)
  nfs: don't open in ->d_revalidate
  vfs: retry last component if opening stale dentry
  vfs: nameidata_to_filp(): don't throw away file on error
  vfs: nameidata_to_filp(): inline __dentry_open()
  vfs: do_dentry_open(): don't put filp
  vfs: split __dentry_open()
  vfs: do_last() common post lookup
  vfs: do_last(): add audit_inode before open
  vfs: do_last(): only return EISDIR for O_CREAT
  vfs: do_last(): check LOOKUP_DIRECTORY
  vfs: do_last(): make ENOENT exit RCU safe
  vfs: make follow_link check RCU safe
  vfs: do_last(): use inode variable
  vfs: do_last(): inline walk_component()
  vfs: do_last(): make exit RCU safe
  vfs: split do_lookup()
  Btrfs: move over to use ->update_time
  fs: introduce inode operation ->update_time
  reiserfs: get rid of resierfs_sync_super
  reiserfs: mark the superblock as dirty a bit later
  ...
Diffstat (limited to 'include/linux/lglock.h')
| -rw-r--r-- | include/linux/lglock.h | 179 | 
1 files changed, 26 insertions, 153 deletions
diff --git a/include/linux/lglock.h b/include/linux/lglock.h index 87f402ccec5..f01e5f6d1f0 100644 --- a/include/linux/lglock.h +++ b/include/linux/lglock.h @@ -23,28 +23,17 @@  #include <linux/lockdep.h>  #include <linux/percpu.h>  #include <linux/cpu.h> +#include <linux/notifier.h>  /* can make br locks by using local lock for read side, global lock for write */ -#define br_lock_init(name)	name##_lock_init() -#define br_read_lock(name)	name##_local_lock() -#define br_read_unlock(name)	name##_local_unlock() -#define br_write_lock(name)	name##_global_lock_online() -#define br_write_unlock(name)	name##_global_unlock_online() +#define br_lock_init(name)	lg_lock_init(name, #name) +#define br_read_lock(name)	lg_local_lock(name) +#define br_read_unlock(name)	lg_local_unlock(name) +#define br_write_lock(name)	lg_global_lock(name) +#define br_write_unlock(name)	lg_global_unlock(name) -#define DECLARE_BRLOCK(name)	DECLARE_LGLOCK(name)  #define DEFINE_BRLOCK(name)	DEFINE_LGLOCK(name) - -#define lg_lock_init(name)	name##_lock_init() -#define lg_local_lock(name)	name##_local_lock() -#define lg_local_unlock(name)	name##_local_unlock() -#define lg_local_lock_cpu(name, cpu)	name##_local_lock_cpu(cpu) -#define lg_local_unlock_cpu(name, cpu)	name##_local_unlock_cpu(cpu) -#define lg_global_lock(name)	name##_global_lock() -#define lg_global_unlock(name)	name##_global_unlock() -#define lg_global_lock_online(name) name##_global_lock_online() -#define lg_global_unlock_online(name) name##_global_unlock_online() -  #ifdef CONFIG_DEBUG_LOCK_ALLOC  #define LOCKDEP_INIT_MAP lockdep_init_map @@ -59,142 +48,26 @@  #define DEFINE_LGLOCK_LOCKDEP(name)  #endif - -#define DECLARE_LGLOCK(name)						\ - extern void name##_lock_init(void);					\ - extern void name##_local_lock(void);					\ - extern void name##_local_unlock(void);					\ - extern void name##_local_lock_cpu(int cpu);				\ - extern void name##_local_unlock_cpu(int cpu);				\ - extern void name##_global_lock(void);					\ - extern void name##_global_unlock(void);				\ - extern void name##_global_lock_online(void);				\ - extern void name##_global_unlock_online(void);				\ +struct lglock { +	arch_spinlock_t __percpu *lock; +#ifdef CONFIG_DEBUG_LOCK_ALLOC +	struct lock_class_key lock_key; +	struct lockdep_map    lock_dep_map; +#endif +};  #define DEFINE_LGLOCK(name)						\ -									\ - DEFINE_SPINLOCK(name##_cpu_lock);					\ - cpumask_t name##_cpus __read_mostly;					\ - DEFINE_PER_CPU(arch_spinlock_t, name##_lock);				\ - DEFINE_LGLOCK_LOCKDEP(name);						\ -									\ - static int								\ - name##_lg_cpu_callback(struct notifier_block *nb,			\ -				unsigned long action, void *hcpu)	\ - {									\ -	switch (action & ~CPU_TASKS_FROZEN) {				\ -	case CPU_UP_PREPARE:						\ -		spin_lock(&name##_cpu_lock);				\ -		cpu_set((unsigned long)hcpu, name##_cpus);		\ -		spin_unlock(&name##_cpu_lock);				\ -		break;							\ -	case CPU_UP_CANCELED: case CPU_DEAD:				\ -		spin_lock(&name##_cpu_lock);				\ -		cpu_clear((unsigned long)hcpu, name##_cpus);		\ -		spin_unlock(&name##_cpu_lock);				\ -	}								\ -	return NOTIFY_OK;						\ - }									\ - static struct notifier_block name##_lg_cpu_notifier = {		\ -	.notifier_call = name##_lg_cpu_callback,			\ - };									\ - void name##_lock_init(void) {						\ -	int i;								\ -	LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \ -	for_each_possible_cpu(i) {					\ -		arch_spinlock_t *lock;					\ -		lock = &per_cpu(name##_lock, i);			\ -		*lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;	\ -	}								\ -	register_hotcpu_notifier(&name##_lg_cpu_notifier);		\ -	get_online_cpus();						\ -	for_each_online_cpu(i)						\ -		cpu_set(i, name##_cpus);				\ -	put_online_cpus();						\ - }									\ - EXPORT_SYMBOL(name##_lock_init);					\ -									\ - void name##_local_lock(void) {						\ -	arch_spinlock_t *lock;						\ -	preempt_disable();						\ -	rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_);	\ -	lock = &__get_cpu_var(name##_lock);				\ -	arch_spin_lock(lock);						\ - }									\ - EXPORT_SYMBOL(name##_local_lock);					\ -									\ - void name##_local_unlock(void) {					\ -	arch_spinlock_t *lock;						\ -	rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_);		\ -	lock = &__get_cpu_var(name##_lock);				\ -	arch_spin_unlock(lock);						\ -	preempt_enable();						\ - }									\ - EXPORT_SYMBOL(name##_local_unlock);					\ -									\ - void name##_local_lock_cpu(int cpu) {					\ -	arch_spinlock_t *lock;						\ -	preempt_disable();						\ -	rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_);	\ -	lock = &per_cpu(name##_lock, cpu);				\ -	arch_spin_lock(lock);						\ - }									\ - EXPORT_SYMBOL(name##_local_lock_cpu);					\ -									\ - void name##_local_unlock_cpu(int cpu) {				\ -	arch_spinlock_t *lock;						\ -	rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_);		\ -	lock = &per_cpu(name##_lock, cpu);				\ -	arch_spin_unlock(lock);						\ -	preempt_enable();						\ - }									\ - EXPORT_SYMBOL(name##_local_unlock_cpu);				\ -									\ - void name##_global_lock_online(void) {					\ -	int i;								\ -	spin_lock(&name##_cpu_lock);					\ -	rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_);		\ -	for_each_cpu(i, &name##_cpus) {					\ -		arch_spinlock_t *lock;					\ -		lock = &per_cpu(name##_lock, i);			\ -		arch_spin_lock(lock);					\ -	}								\ - }									\ - EXPORT_SYMBOL(name##_global_lock_online);				\ -									\ - void name##_global_unlock_online(void) {				\ -	int i;								\ -	rwlock_release(&name##_lock_dep_map, 1, _RET_IP_);		\ -	for_each_cpu(i, &name##_cpus) {					\ -		arch_spinlock_t *lock;					\ -		lock = &per_cpu(name##_lock, i);			\ -		arch_spin_unlock(lock);					\ -	}								\ -	spin_unlock(&name##_cpu_lock);					\ - }									\ - EXPORT_SYMBOL(name##_global_unlock_online);				\ -									\ - void name##_global_lock(void) {					\ -	int i;								\ -	preempt_disable();						\ -	rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_);		\ -	for_each_possible_cpu(i) {					\ -		arch_spinlock_t *lock;					\ -		lock = &per_cpu(name##_lock, i);			\ -		arch_spin_lock(lock);					\ -	}								\ - }									\ - EXPORT_SYMBOL(name##_global_lock);					\ -									\ - void name##_global_unlock(void) {					\ -	int i;								\ -	rwlock_release(&name##_lock_dep_map, 1, _RET_IP_);		\ -	for_each_possible_cpu(i) {					\ -		arch_spinlock_t *lock;					\ -		lock = &per_cpu(name##_lock, i);			\ -		arch_spin_unlock(lock);					\ -	}								\ -	preempt_enable();						\ - }									\ - EXPORT_SYMBOL(name##_global_unlock); +	DEFINE_LGLOCK_LOCKDEP(name);					\ +	DEFINE_PER_CPU(arch_spinlock_t, name ## _lock)			\ +	= __ARCH_SPIN_LOCK_UNLOCKED;					\ +	struct lglock name = { .lock = &name ## _lock } + +void lg_lock_init(struct lglock *lg, char *name); +void lg_local_lock(struct lglock *lg); +void lg_local_unlock(struct lglock *lg); +void lg_local_lock_cpu(struct lglock *lg, int cpu); +void lg_local_unlock_cpu(struct lglock *lg, int cpu); +void lg_global_lock(struct lglock *lg); +void lg_global_unlock(struct lglock *lg); +  #endif  |