mirror of
				git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
				synced 2025-09-04 20:19:47 +08:00 
			
		
		
		
	 6aa7de0591
			
		
	
	
		6aa7de0591
		
	
	
	
	
		
			
			Please do not apply this to mainline directly, instead please re-run the
coccinelle script shown below and apply its output.
For several reasons, it is desirable to use {READ,WRITE}_ONCE() in
preference to ACCESS_ONCE(), and new code is expected to use one of the
former. So far, there's been no reason to change most existing uses of
ACCESS_ONCE(), as these aren't harmful, and changing them results in
churn.
However, for some features, the read/write distinction is critical to
correct operation. To distinguish these cases, separate read/write
accessors must be used. This patch migrates (most) remaining
ACCESS_ONCE() instances to {READ,WRITE}_ONCE(), using the following
coccinelle script:
----
// Convert trivial ACCESS_ONCE() uses to equivalent READ_ONCE() and
// WRITE_ONCE()
// $ make coccicheck COCCI=/home/mark/once.cocci SPFLAGS="--include-headers" MODE=patch
virtual patch
@ depends on patch @
expression E1, E2;
@@
- ACCESS_ONCE(E1) = E2
+ WRITE_ONCE(E1, E2)
@ depends on patch @
expression E;
@@
- ACCESS_ONCE(E)
+ READ_ONCE(E)
----
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: davem@davemloft.net
Cc: linux-arch@vger.kernel.org
Cc: mpe@ellerman.id.au
Cc: shuah@kernel.org
Cc: snitzer@redhat.com
Cc: thor.thayer@linux.intel.com
Cc: tj@kernel.org
Cc: viro@zeniv.linux.org.uk
Cc: will.deacon@arm.com
Link: http://lkml.kernel.org/r/1508792849-3115-19-git-send-email-paulmck@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
		
	
			
		
			
				
	
	
		
			105 lines
		
	
	
		
			3.1 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			105 lines
		
	
	
		
			3.1 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| /*
 | |
|  * Lock-less NULL terminated single linked list
 | |
|  *
 | |
|  * The basic atomic operation of this list is cmpxchg on long.  On
 | |
|  * architectures that don't have NMI-safe cmpxchg implementation, the
 | |
|  * list can NOT be used in NMI handlers.  So code that uses the list in
 | |
|  * an NMI handler should depend on CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
 | |
|  *
 | |
|  * Copyright 2010,2011 Intel Corp.
 | |
|  *   Author: Huang Ying <ying.huang@intel.com>
 | |
|  *
 | |
|  * This program is free software; you can redistribute it and/or
 | |
|  * modify it under the terms of the GNU General Public License version
 | |
|  * 2 as published by the Free Software Foundation;
 | |
|  *
 | |
|  * This program is distributed in the hope that it will be useful,
 | |
|  * but WITHOUT ANY WARRANTY; without even the implied warranty of
 | |
|  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | |
|  * GNU General Public License for more details.
 | |
|  *
 | |
|  * You should have received a copy of the GNU General Public License
 | |
|  * along with this program; if not, write to the Free Software
 | |
|  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 | |
|  */
 | |
| #include <linux/kernel.h>
 | |
| #include <linux/export.h>
 | |
| #include <linux/llist.h>
 | |
| 
 | |
| 
 | |
| /**
 | |
|  * llist_add_batch - add several linked entries in batch
 | |
|  * @new_first:	first entry in batch to be added
 | |
|  * @new_last:	last entry in batch to be added
 | |
|  * @head:	the head for your lock-less list
 | |
|  *
 | |
|  * Return whether list is empty before adding.
 | |
|  */
 | |
| bool llist_add_batch(struct llist_node *new_first, struct llist_node *new_last,
 | |
| 		     struct llist_head *head)
 | |
| {
 | |
| 	struct llist_node *first;
 | |
| 
 | |
| 	do {
 | |
| 		new_last->next = first = READ_ONCE(head->first);
 | |
| 	} while (cmpxchg(&head->first, first, new_first) != first);
 | |
| 
 | |
| 	return !first;
 | |
| }
 | |
| EXPORT_SYMBOL_GPL(llist_add_batch);
 | |
| 
 | |
| /**
 | |
|  * llist_del_first - delete the first entry of lock-less list
 | |
|  * @head:	the head for your lock-less list
 | |
|  *
 | |
|  * If list is empty, return NULL, otherwise, return the first entry
 | |
|  * deleted, this is the newest added one.
 | |
|  *
 | |
|  * Only one llist_del_first user can be used simultaneously with
 | |
|  * multiple llist_add users without lock.  Because otherwise
 | |
|  * llist_del_first, llist_add, llist_add (or llist_del_all, llist_add,
 | |
|  * llist_add) sequence in another user may change @head->first->next,
 | |
|  * but keep @head->first.  If multiple consumers are needed, please
 | |
|  * use llist_del_all or use lock between consumers.
 | |
|  */
 | |
| struct llist_node *llist_del_first(struct llist_head *head)
 | |
| {
 | |
| 	struct llist_node *entry, *old_entry, *next;
 | |
| 
 | |
| 	entry = smp_load_acquire(&head->first);
 | |
| 	for (;;) {
 | |
| 		if (entry == NULL)
 | |
| 			return NULL;
 | |
| 		old_entry = entry;
 | |
| 		next = READ_ONCE(entry->next);
 | |
| 		entry = cmpxchg(&head->first, old_entry, next);
 | |
| 		if (entry == old_entry)
 | |
| 			break;
 | |
| 	}
 | |
| 
 | |
| 	return entry;
 | |
| }
 | |
| EXPORT_SYMBOL_GPL(llist_del_first);
 | |
| 
 | |
| /**
 | |
|  * llist_reverse_order - reverse order of a llist chain
 | |
|  * @head:	first item of the list to be reversed
 | |
|  *
 | |
|  * Reverse the order of a chain of llist entries and return the
 | |
|  * new first entry.
 | |
|  */
 | |
| struct llist_node *llist_reverse_order(struct llist_node *head)
 | |
| {
 | |
| 	struct llist_node *new_head = NULL;
 | |
| 
 | |
| 	while (head) {
 | |
| 		struct llist_node *tmp = head;
 | |
| 		head = head->next;
 | |
| 		tmp->next = new_head;
 | |
| 		new_head = tmp;
 | |
| 	}
 | |
| 
 | |
| 	return new_head;
 | |
| }
 | |
| EXPORT_SYMBOL_GPL(llist_reverse_order);
 |