mirror of
				git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
				synced 2025-09-04 20:19:47 +08:00 
			
		
		
		
	 689911c734
			
		
	
	
		689911c734
		
	
	
	
	
		
			
			As part of addressing the "y2038 problem" for in-kernel uses, this patch converts read_boot_clock() to read_boot_clock64() and read_persistent_clock() to read_persistent_clock64() using timespec64. Rename some instances of 'timespec' to 'timespec64' in time.c and related references Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@kernel.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: linux390@de.ibm.com Signed-off-by: Xunlei Pang <pang.xunlei@linaro.org> [jstultz: Fixed minor style and grammer tweaks pointed out by Ingo] Signed-off-by: John Stultz <john.stultz@linaro.org>
		
			
				
	
	
		
			165 lines
		
	
	
		
			3.8 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			165 lines
		
	
	
		
			3.8 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| /*
 | |
|  *  S390 version
 | |
|  *    Copyright IBM Corp. 1999
 | |
|  *
 | |
|  *  Derived from "include/asm-i386/timex.h"
 | |
|  *    Copyright (C) 1992, Linus Torvalds
 | |
|  */
 | |
| 
 | |
| #ifndef _ASM_S390_TIMEX_H
 | |
| #define _ASM_S390_TIMEX_H
 | |
| 
 | |
| #include <asm/lowcore.h>
 | |
| #include <linux/time64.h>
 | |
| 
 | |
| /* The value of the TOD clock for 1.1.1970. */
 | |
| #define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
 | |
| 
 | |
| /* Inline functions for clock register access. */
 | |
| static inline int set_tod_clock(__u64 time)
 | |
| {
 | |
| 	int cc;
 | |
| 
 | |
| 	asm volatile(
 | |
| 		"   sck   %1\n"
 | |
| 		"   ipm   %0\n"
 | |
| 		"   srl   %0,28\n"
 | |
| 		: "=d" (cc) : "Q" (time) : "cc");
 | |
| 	return cc;
 | |
| }
 | |
| 
 | |
| static inline int store_tod_clock(__u64 *time)
 | |
| {
 | |
| 	int cc;
 | |
| 
 | |
| 	asm volatile(
 | |
| 		"   stck  %1\n"
 | |
| 		"   ipm   %0\n"
 | |
| 		"   srl   %0,28\n"
 | |
| 		: "=d" (cc), "=Q" (*time) : : "cc");
 | |
| 	return cc;
 | |
| }
 | |
| 
 | |
| static inline void set_clock_comparator(__u64 time)
 | |
| {
 | |
| 	asm volatile("sckc %0" : : "Q" (time));
 | |
| }
 | |
| 
 | |
| static inline void store_clock_comparator(__u64 *time)
 | |
| {
 | |
| 	asm volatile("stckc %0" : "=Q" (*time));
 | |
| }
 | |
| 
 | |
| void clock_comparator_work(void);
 | |
| 
 | |
| static inline unsigned long long local_tick_disable(void)
 | |
| {
 | |
| 	unsigned long long old;
 | |
| 
 | |
| 	old = S390_lowcore.clock_comparator;
 | |
| 	S390_lowcore.clock_comparator = -1ULL;
 | |
| 	set_clock_comparator(S390_lowcore.clock_comparator);
 | |
| 	return old;
 | |
| }
 | |
| 
 | |
| static inline void local_tick_enable(unsigned long long comp)
 | |
| {
 | |
| 	S390_lowcore.clock_comparator = comp;
 | |
| 	set_clock_comparator(S390_lowcore.clock_comparator);
 | |
| }
 | |
| 
 | |
| #define CLOCK_TICK_RATE		1193180 /* Underlying HZ */
 | |
| #define STORE_CLOCK_EXT_SIZE	16	/* stcke writes 16 bytes */
 | |
| 
 | |
| typedef unsigned long long cycles_t;
 | |
| 
 | |
| static inline void get_tod_clock_ext(char *clk)
 | |
| {
 | |
| 	typedef struct { char _[STORE_CLOCK_EXT_SIZE]; } addrtype;
 | |
| 
 | |
| 	asm volatile("stcke %0" : "=Q" (*(addrtype *) clk) : : "cc");
 | |
| }
 | |
| 
 | |
| static inline unsigned long long get_tod_clock(void)
 | |
| {
 | |
| 	unsigned char clk[STORE_CLOCK_EXT_SIZE];
 | |
| 
 | |
| 	get_tod_clock_ext(clk);
 | |
| 	return *((unsigned long long *)&clk[1]);
 | |
| }
 | |
| 
 | |
| static inline unsigned long long get_tod_clock_fast(void)
 | |
| {
 | |
| #ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
 | |
| 	unsigned long long clk;
 | |
| 
 | |
| 	asm volatile("stckf %0" : "=Q" (clk) : : "cc");
 | |
| 	return clk;
 | |
| #else
 | |
| 	return get_tod_clock();
 | |
| #endif
 | |
| }
 | |
| 
 | |
| static inline cycles_t get_cycles(void)
 | |
| {
 | |
| 	return (cycles_t) get_tod_clock() >> 2;
 | |
| }
 | |
| 
 | |
| int get_sync_clock(unsigned long long *clock);
 | |
| void init_cpu_timer(void);
 | |
| unsigned long long monotonic_clock(void);
 | |
| 
 | |
| void tod_to_timeval(__u64 todval, struct timespec64 *xt);
 | |
| 
 | |
| static inline
 | |
| void stck_to_timespec64(unsigned long long stck, struct timespec64 *ts)
 | |
| {
 | |
| 	tod_to_timeval(stck - TOD_UNIX_EPOCH, ts);
 | |
| }
 | |
| 
 | |
| extern u64 sched_clock_base_cc;
 | |
| 
 | |
| /**
 | |
|  * get_clock_monotonic - returns current time in clock rate units
 | |
|  *
 | |
|  * The caller must ensure that preemption is disabled.
 | |
|  * The clock and sched_clock_base get changed via stop_machine.
 | |
|  * Therefore preemption must be disabled when calling this
 | |
|  * function, otherwise the returned value is not guaranteed to
 | |
|  * be monotonic.
 | |
|  */
 | |
| static inline unsigned long long get_tod_clock_monotonic(void)
 | |
| {
 | |
| 	return get_tod_clock() - sched_clock_base_cc;
 | |
| }
 | |
| 
 | |
| /**
 | |
|  * tod_to_ns - convert a TOD format value to nanoseconds
 | |
|  * @todval: to be converted TOD format value
 | |
|  * Returns: number of nanoseconds that correspond to the TOD format value
 | |
|  *
 | |
|  * Converting a 64 Bit TOD format value to nanoseconds means that the value
 | |
|  * must be divided by 4.096. In order to achieve that we multiply with 125
 | |
|  * and divide by 512:
 | |
|  *
 | |
|  *    ns = (todval * 125) >> 9;
 | |
|  *
 | |
|  * In order to avoid an overflow with the multiplication we can rewrite this.
 | |
|  * With a split todval == 2^32 * th + tl (th upper 32 bits, tl lower 32 bits)
 | |
|  * we end up with
 | |
|  *
 | |
|  *    ns = ((2^32 * th + tl) * 125 ) >> 9;
 | |
|  * -> ns = (2^23 * th * 125) + ((tl * 125) >> 9);
 | |
|  *
 | |
|  */
 | |
| static inline unsigned long long tod_to_ns(unsigned long long todval)
 | |
| {
 | |
| 	unsigned long long ns;
 | |
| 
 | |
| 	ns = ((todval >> 32) << 23) * 125;
 | |
| 	ns += ((todval & 0xffffffff) * 125) >> 9;
 | |
| 	return ns;
 | |
| }
 | |
| 
 | |
| #endif
 |