Subversion Repositories general

Compare Revisions

Ignore whitespace Rev 1103 → Rev 1104

/FreeBSD/mac_settime/trunk/origins/kern/kern_time.c
0,0 → 1,777
/*-
* Copyright (c) 1982, 1986, 1989, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)kern_time.c 8.1 (Berkeley) 6/10/93
*/
 
#include <sys/cdefs.h>
__FBSDID("$FreeBSD: src/sys/kern/kern_time.c,v 1.116 2005/03/31 22:51:18 jhb Exp $");
 
#include "opt_mac.h"
 
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/sysproto.h>
#include <sys/resourcevar.h>
#include <sys/signalvar.h>
#include <sys/kernel.h>
#include <sys/mac.h>
#include <sys/syscallsubr.h>
#include <sys/sysent.h>
#include <sys/proc.h>
#include <sys/time.h>
#include <sys/timetc.h>
#include <sys/vnode.h>
 
#include <vm/vm.h>
#include <vm/vm_extern.h>
 
int tz_minuteswest;
int tz_dsttime;
 
/*
* Time of day and interval timer support.
*
* These routines provide the kernel entry points to get and set
* the time-of-day and per-process interval timers. Subroutines
* here provide support for adding and subtracting timeval structures
* and decrementing interval timers, optionally reloading the interval
* timers when they expire.
*/
 
static int settime(struct thread *, struct timeval *);
static void timevalfix(struct timeval *);
static void no_lease_updatetime(int);
 
static void
no_lease_updatetime(deltat)
int deltat;
{
}
 
void (*lease_updatetime)(int) = no_lease_updatetime;
 
static int
settime(struct thread *td, struct timeval *tv)
{
struct timeval delta, tv1, tv2;
static struct timeval maxtime, laststep;
struct timespec ts;
int s;
 
s = splclock();
microtime(&tv1);
delta = *tv;
timevalsub(&delta, &tv1);
 
/*
* If the system is secure, we do not allow the time to be
* set to a value earlier than 1 second less than the highest
* time we have yet seen. The worst a miscreant can do in
* this circumstance is "freeze" time. He couldn't go
* back to the past.
*
* We similarly do not allow the clock to be stepped more
* than one second, nor more than once per second. This allows
* a miscreant to make the clock march double-time, but no worse.
*/
if (securelevel_gt(td->td_ucred, 1) != 0) {
if (delta.tv_sec < 0 || delta.tv_usec < 0) {
/*
* Update maxtime to latest time we've seen.
*/
if (tv1.tv_sec > maxtime.tv_sec)
maxtime = tv1;
tv2 = *tv;
timevalsub(&tv2, &maxtime);
if (tv2.tv_sec < -1) {
tv->tv_sec = maxtime.tv_sec - 1;
printf("Time adjustment clamped to -1 second\n");
}
} else {
if (tv1.tv_sec == laststep.tv_sec) {
splx(s);
return (EPERM);
}
if (delta.tv_sec > 1) {
tv->tv_sec = tv1.tv_sec + 1;
printf("Time adjustment clamped to +1 second\n");
}
laststep = *tv;
}
}
 
ts.tv_sec = tv->tv_sec;
ts.tv_nsec = tv->tv_usec * 1000;
mtx_lock(&Giant);
tc_setclock(&ts);
(void) splsoftclock();
lease_updatetime(delta.tv_sec);
splx(s);
resettodr();
mtx_unlock(&Giant);
return (0);
}
 
#ifndef _SYS_SYSPROTO_H_
struct clock_gettime_args {
clockid_t clock_id;
struct timespec *tp;
};
#endif
 
/*
* MPSAFE
*/
/* ARGSUSED */
int
clock_gettime(struct thread *td, struct clock_gettime_args *uap)
{
struct timespec ats;
struct timeval sys, user;
struct proc *p;
 
p = td->td_proc;
switch (uap->clock_id) {
case CLOCK_REALTIME:
nanotime(&ats);
break;
case CLOCK_VIRTUAL:
PROC_LOCK(p);
calcru(p, &user, &sys);
PROC_UNLOCK(p);
TIMEVAL_TO_TIMESPEC(&user, &ats);
break;
case CLOCK_PROF:
PROC_LOCK(p);
calcru(p, &user, &sys);
PROC_UNLOCK(p);
timevaladd(&user, &sys);
TIMEVAL_TO_TIMESPEC(&user, &ats);
break;
case CLOCK_MONOTONIC:
nanouptime(&ats);
break;
default:
return (EINVAL);
}
return (copyout(&ats, uap->tp, sizeof(ats)));
}
 
#ifndef _SYS_SYSPROTO_H_
struct clock_settime_args {
clockid_t clock_id;
const struct timespec *tp;
};
#endif
 
/*
* MPSAFE
*/
/* ARGSUSED */
int
clock_settime(struct thread *td, struct clock_settime_args *uap)
{
struct timeval atv;
struct timespec ats;
int error;
 
#ifdef MAC
error = mac_check_system_settime(td->td_ucred);
if (error)
return (error);
#endif
if ((error = suser(td)) != 0)
return (error);
if (uap->clock_id != CLOCK_REALTIME)
return (EINVAL);
if ((error = copyin(uap->tp, &ats, sizeof(ats))) != 0)
return (error);
if (ats.tv_nsec < 0 || ats.tv_nsec >= 1000000000)
return (EINVAL);
/* XXX Don't convert nsec->usec and back */
TIMESPEC_TO_TIMEVAL(&atv, &ats);
error = settime(td, &atv);
return (error);
}
 
#ifndef _SYS_SYSPROTO_H_
struct clock_getres_args {
clockid_t clock_id;
struct timespec *tp;
};
#endif
 
int
clock_getres(struct thread *td, struct clock_getres_args *uap)
{
struct timespec ts;
 
ts.tv_sec = 0;
switch (uap->clock_id) {
case CLOCK_REALTIME:
case CLOCK_MONOTONIC:
/*
* Round up the result of the division cheaply by adding 1.
* Rounding up is especially important if rounding down
* would give 0. Perfect rounding is unimportant.
*/
ts.tv_nsec = 1000000000 / tc_getfrequency() + 1;
break;
case CLOCK_VIRTUAL:
case CLOCK_PROF:
/* Accurately round up here because we can do so cheaply. */
ts.tv_nsec = (1000000000 + hz - 1) / hz;
break;
default:
return (EINVAL);
}
if (uap->tp == NULL)
return (0);
return (copyout(&ts, uap->tp, sizeof(ts)));
}
 
static int nanowait;
 
int
kern_nanosleep(struct thread *td, struct timespec *rqt, struct timespec *rmt)
{
struct timespec ts, ts2, ts3;
struct timeval tv;
int error;
 
if (rqt->tv_nsec < 0 || rqt->tv_nsec >= 1000000000)
return (EINVAL);
if (rqt->tv_sec < 0 || (rqt->tv_sec == 0 && rqt->tv_nsec == 0))
return (0);
getnanouptime(&ts);
timespecadd(&ts, rqt);
TIMESPEC_TO_TIMEVAL(&tv, rqt);
for (;;) {
error = tsleep(&nanowait, PWAIT | PCATCH, "nanslp",
tvtohz(&tv));
getnanouptime(&ts2);
if (error != EWOULDBLOCK) {
if (error == ERESTART)
error = EINTR;
if (rmt != NULL) {
timespecsub(&ts, &ts2);
if (ts.tv_sec < 0)
timespecclear(&ts);
*rmt = ts;
}
return (error);
}
if (timespeccmp(&ts2, &ts, >=))
return (0);
ts3 = ts;
timespecsub(&ts3, &ts2);
TIMESPEC_TO_TIMEVAL(&tv, &ts3);
}
}
 
#ifndef _SYS_SYSPROTO_H_
struct nanosleep_args {
struct timespec *rqtp;
struct timespec *rmtp;
};
#endif
 
/*
* MPSAFE
*/
/* ARGSUSED */
int
nanosleep(struct thread *td, struct nanosleep_args *uap)
{
struct timespec rmt, rqt;
int error;
 
error = copyin(uap->rqtp, &rqt, sizeof(rqt));
if (error)
return (error);
 
if (uap->rmtp &&
!useracc((caddr_t)uap->rmtp, sizeof(rmt), VM_PROT_WRITE))
return (EFAULT);
error = kern_nanosleep(td, &rqt, &rmt);
if (error && uap->rmtp) {
int error2;
 
error2 = copyout(&rmt, uap->rmtp, sizeof(rmt));
if (error2)
error = error2;
}
return (error);
}
 
#ifndef _SYS_SYSPROTO_H_
struct gettimeofday_args {
struct timeval *tp;
struct timezone *tzp;
};
#endif
/*
* MPSAFE
*/
/* ARGSUSED */
int
gettimeofday(struct thread *td, struct gettimeofday_args *uap)
{
struct timeval atv;
struct timezone rtz;
int error = 0;
 
if (uap->tp) {
microtime(&atv);
error = copyout(&atv, uap->tp, sizeof (atv));
}
if (error == 0 && uap->tzp != NULL) {
rtz.tz_minuteswest = tz_minuteswest;
rtz.tz_dsttime = tz_dsttime;
error = copyout(&rtz, uap->tzp, sizeof (rtz));
}
return (error);
}
 
#ifndef _SYS_SYSPROTO_H_
struct settimeofday_args {
struct timeval *tv;
struct timezone *tzp;
};
#endif
/*
* MPSAFE
*/
/* ARGSUSED */
int
settimeofday(struct thread *td, struct settimeofday_args *uap)
{
struct timeval atv, *tvp;
struct timezone atz, *tzp;
int error;
 
if (uap->tv) {
error = copyin(uap->tv, &atv, sizeof(atv));
if (error)
return (error);
tvp = &atv;
} else
tvp = NULL;
if (uap->tzp) {
error = copyin(uap->tzp, &atz, sizeof(atz));
if (error)
return (error);
tzp = &atz;
} else
tzp = NULL;
return (kern_settimeofday(td, tvp, tzp));
}
 
int
kern_settimeofday(struct thread *td, struct timeval *tv, struct timezone *tzp)
{
int error;
 
#ifdef MAC
error = mac_check_system_settime(td->td_ucred);
if (error)
return (error);
#endif
error = suser(td);
if (error)
return (error);
/* Verify all parameters before changing time. */
if (tv) {
if (tv->tv_usec < 0 || tv->tv_usec >= 1000000)
return (EINVAL);
error = settime(td, tv);
}
if (tzp && error == 0) {
tz_minuteswest = tzp->tz_minuteswest;
tz_dsttime = tzp->tz_dsttime;
}
return (error);
}
 
/*
* Get value of an interval timer. The process virtual and
* profiling virtual time timers are kept in the p_stats area, since
* they can be swapped out. These are kept internally in the
* way they are specified externally: in time until they expire.
*
* The real time interval timer is kept in the process table slot
* for the process, and its value (it_value) is kept as an
* absolute time rather than as a delta, so that it is easy to keep
* periodic real-time signals from drifting.
*
* Virtual time timers are processed in the hardclock() routine of
* kern_clock.c. The real time timer is processed by a timeout
* routine, called from the softclock() routine. Since a callout
* may be delayed in real time due to interrupt processing in the system,
* it is possible for the real time timeout routine (realitexpire, given below),
* to be delayed in real time past when it is supposed to occur. It
* does not suffice, therefore, to reload the real timer .it_value from the
* real time timers .it_interval. Rather, we compute the next time in
* absolute time the timer should go off.
*/
#ifndef _SYS_SYSPROTO_H_
struct getitimer_args {
u_int which;
struct itimerval *itv;
};
#endif
/*
* MPSAFE
*/
int
getitimer(struct thread *td, struct getitimer_args *uap)
{
struct itimerval aitv;
int error;
 
error = kern_getitimer(td, uap->which, &aitv);
if (error != 0)
return (error);
return (copyout(&aitv, uap->itv, sizeof (struct itimerval)));
}
 
int
kern_getitimer(struct thread *td, u_int which, struct itimerval *aitv)
{
struct proc *p = td->td_proc;
struct timeval ctv;
 
if (which > ITIMER_PROF)
return (EINVAL);
 
if (which == ITIMER_REAL) {
/*
* Convert from absolute to relative time in .it_value
* part of real time timer. If time for real time timer
* has passed return 0, else return difference between
* current time and time for the timer to go off.
*/
PROC_LOCK(p);
*aitv = p->p_realtimer;
PROC_UNLOCK(p);
if (timevalisset(&aitv->it_value)) {
getmicrouptime(&ctv);
if (timevalcmp(&aitv->it_value, &ctv, <))
timevalclear(&aitv->it_value);
else
timevalsub(&aitv->it_value, &ctv);
}
} else {
mtx_lock_spin(&sched_lock);
*aitv = p->p_stats->p_timer[which];
mtx_unlock_spin(&sched_lock);
}
return (0);
}
 
#ifndef _SYS_SYSPROTO_H_
struct setitimer_args {
u_int which;
struct itimerval *itv, *oitv;
};
#endif
 
/*
* MPSAFE
*/
int
setitimer(struct thread *td, struct setitimer_args *uap)
{
struct itimerval aitv, oitv;
int error;
 
if (uap->itv == NULL) {
uap->itv = uap->oitv;
return (getitimer(td, (struct getitimer_args *)uap));
}
 
if ((error = copyin(uap->itv, &aitv, sizeof(struct itimerval))))
return (error);
error = kern_setitimer(td, uap->which, &aitv, &oitv);
if (error != 0 || uap->oitv == NULL)
return (error);
return (copyout(&oitv, uap->oitv, sizeof(struct itimerval)));
}
 
int
kern_setitimer(struct thread *td, u_int which, struct itimerval *aitv,
struct itimerval *oitv)
{
struct proc *p = td->td_proc;
struct timeval ctv;
 
if (aitv == NULL)
return (kern_getitimer(td, which, oitv));
 
if (which > ITIMER_PROF)
return (EINVAL);
if (itimerfix(&aitv->it_value))
return (EINVAL);
if (!timevalisset(&aitv->it_value))
timevalclear(&aitv->it_interval);
else if (itimerfix(&aitv->it_interval))
return (EINVAL);
 
if (which == ITIMER_REAL) {
PROC_LOCK(p);
if (timevalisset(&p->p_realtimer.it_value))
callout_stop(&p->p_itcallout);
getmicrouptime(&ctv);
if (timevalisset(&aitv->it_value)) {
callout_reset(&p->p_itcallout, tvtohz(&aitv->it_value),
realitexpire, p);
timevaladd(&aitv->it_value, &ctv);
}
*oitv = p->p_realtimer;
p->p_realtimer = *aitv;
PROC_UNLOCK(p);
if (timevalisset(&oitv->it_value)) {
if (timevalcmp(&oitv->it_value, &ctv, <))
timevalclear(&oitv->it_value);
else
timevalsub(&oitv->it_value, &ctv);
}
} else {
mtx_lock_spin(&sched_lock);
*oitv = p->p_stats->p_timer[which];
p->p_stats->p_timer[which] = *aitv;
mtx_unlock_spin(&sched_lock);
}
return (0);
}
 
/*
* Real interval timer expired:
* send process whose timer expired an alarm signal.
* If time is not set up to reload, then just return.
* Else compute next time timer should go off which is > current time.
* This is where delay in processing this timeout causes multiple
* SIGALRM calls to be compressed into one.
* tvtohz() always adds 1 to allow for the time until the next clock
* interrupt being strictly less than 1 clock tick, but we don't want
* that here since we want to appear to be in sync with the clock
* interrupt even when we're delayed.
*/
void
realitexpire(void *arg)
{
struct proc *p;
struct timeval ctv, ntv;
 
p = (struct proc *)arg;
PROC_LOCK(p);
psignal(p, SIGALRM);
if (!timevalisset(&p->p_realtimer.it_interval)) {
timevalclear(&p->p_realtimer.it_value);
if (p->p_flag & P_WEXIT)
wakeup(&p->p_itcallout);
PROC_UNLOCK(p);
return;
}
for (;;) {
timevaladd(&p->p_realtimer.it_value,
&p->p_realtimer.it_interval);
getmicrouptime(&ctv);
if (timevalcmp(&p->p_realtimer.it_value, &ctv, >)) {
ntv = p->p_realtimer.it_value;
timevalsub(&ntv, &ctv);
callout_reset(&p->p_itcallout, tvtohz(&ntv) - 1,
realitexpire, p);
PROC_UNLOCK(p);
return;
}
}
/*NOTREACHED*/
}
 
/*
* Check that a proposed value to load into the .it_value or
* .it_interval part of an interval timer is acceptable, and
* fix it to have at least minimal value (i.e. if it is less
* than the resolution of the clock, round it up.)
*/
int
itimerfix(struct timeval *tv)
{
 
if (tv->tv_sec < 0 || tv->tv_sec > 100000000 ||
tv->tv_usec < 0 || tv->tv_usec >= 1000000)
return (EINVAL);
if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick)
tv->tv_usec = tick;
return (0);
}
 
/*
* Decrement an interval timer by a specified number
* of microseconds, which must be less than a second,
* i.e. < 1000000. If the timer expires, then reload
* it. In this case, carry over (usec - old value) to
* reduce the value reloaded into the timer so that
* the timer does not drift. This routine assumes
* that it is called in a context where the timers
* on which it is operating cannot change in value.
*/
int
itimerdecr(struct itimerval *itp, int usec)
{
 
if (itp->it_value.tv_usec < usec) {
if (itp->it_value.tv_sec == 0) {
/* expired, and already in next interval */
usec -= itp->it_value.tv_usec;
goto expire;
}
itp->it_value.tv_usec += 1000000;
itp->it_value.tv_sec--;
}
itp->it_value.tv_usec -= usec;
usec = 0;
if (timevalisset(&itp->it_value))
return (1);
/* expired, exactly at end of interval */
expire:
if (timevalisset(&itp->it_interval)) {
itp->it_value = itp->it_interval;
itp->it_value.tv_usec -= usec;
if (itp->it_value.tv_usec < 0) {
itp->it_value.tv_usec += 1000000;
itp->it_value.tv_sec--;
}
} else
itp->it_value.tv_usec = 0; /* sec is already 0 */
return (0);
}
 
/*
* Add and subtract routines for timevals.
* N.B.: subtract routine doesn't deal with
* results which are before the beginning,
* it just gets very confused in this case.
* Caveat emptor.
*/
void
timevaladd(struct timeval *t1, const struct timeval *t2)
{
 
t1->tv_sec += t2->tv_sec;
t1->tv_usec += t2->tv_usec;
timevalfix(t1);
}
 
void
timevalsub(struct timeval *t1, const struct timeval *t2)
{
 
t1->tv_sec -= t2->tv_sec;
t1->tv_usec -= t2->tv_usec;
timevalfix(t1);
}
 
static void
timevalfix(struct timeval *t1)
{
 
if (t1->tv_usec < 0) {
t1->tv_sec--;
t1->tv_usec += 1000000;
}
if (t1->tv_usec >= 1000000) {
t1->tv_sec++;
t1->tv_usec -= 1000000;
}
}
 
/*
* ratecheck(): simple time-based rate-limit checking.
*/
int
ratecheck(struct timeval *lasttime, const struct timeval *mininterval)
{
struct timeval tv, delta;
int rv = 0;
 
getmicrouptime(&tv); /* NB: 10ms precision */
delta = tv;
timevalsub(&delta, lasttime);
 
/*
* check for 0,0 is so that the message will be seen at least once,
* even if interval is huge.
*/
if (timevalcmp(&delta, mininterval, >=) ||
(lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) {
*lasttime = tv;
rv = 1;
}
 
return (rv);
}
 
/*
* ppsratecheck(): packets (or events) per second limitation.
*
* Return 0 if the limit is to be enforced (e.g. the caller
* should drop a packet because of the rate limitation).
*
* maxpps of 0 always causes zero to be returned. maxpps of -1
* always causes 1 to be returned; this effectively defeats rate
* limiting.
*
* Note that we maintain the struct timeval for compatibility
* with other bsd systems. We reuse the storage and just monitor
* clock ticks for minimal overhead.
*/
int
ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps)
{
int now;
 
/*
* Reset the last time and counter if this is the first call
* or more than a second has passed since the last update of
* lasttime.
*/
now = ticks;
if (lasttime->tv_sec == 0 || (u_int)(now - lasttime->tv_sec) >= hz) {
lasttime->tv_sec = now;
*curpps = 1;
return (maxpps != 0);
} else {
(*curpps)++; /* NB: ignore potential overflow */
return (maxpps < 0 || *curpps < maxpps);
}
}
/FreeBSD/mac_settime/trunk/origins/kern/kern_ntptime.c
0,0 → 1,976
/*-
***********************************************************************
* *
* Copyright (c) David L. Mills 1993-2001 *
* *
* Permission to use, copy, modify, and distribute this software and *
* its documentation for any purpose and without fee is hereby *
* granted, provided that the above copyright notice appears in all *
* copies and that both the copyright notice and this permission *
* notice appear in supporting documentation, and that the name *
* University of Delaware not be used in advertising or publicity *
* pertaining to distribution of the software without specific, *
* written prior permission. The University of Delaware makes no *
* representations about the suitability this software for any *
* purpose. It is provided "as is" without express or implied *
* warranty. *
* *
**********************************************************************/
 
/*
* Adapted from the original sources for FreeBSD and timecounters by:
* Poul-Henning Kamp <phk@FreeBSD.org>.
*
* The 32bit version of the "LP" macros seems a bit past its "sell by"
* date so I have retained only the 64bit version and included it directly
* in this file.
*
* Only minor changes done to interface with the timecounters over in
* sys/kern/kern_clock.c. Some of the comments below may be (even more)
* confusing and/or plain wrong in that context.
*/
 
#include <sys/cdefs.h>
__FBSDID("$FreeBSD: src/sys/kern/kern_ntptime.c,v 1.59 2005/05/28 14:34:41 rwatson Exp $");
 
#include "opt_ntp.h"
 
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/sysproto.h>
#include <sys/kernel.h>
#include <sys/proc.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/time.h>
#include <sys/timex.h>
#include <sys/timetc.h>
#include <sys/timepps.h>
#include <sys/syscallsubr.h>
#include <sys/sysctl.h>
 
/*
* Single-precision macros for 64-bit machines
*/
typedef int64_t l_fp;
#define L_ADD(v, u) ((v) += (u))
#define L_SUB(v, u) ((v) -= (u))
#define L_ADDHI(v, a) ((v) += (int64_t)(a) << 32)
#define L_NEG(v) ((v) = -(v))
#define L_RSHIFT(v, n) \
do { \
if ((v) < 0) \
(v) = -(-(v) >> (n)); \
else \
(v) = (v) >> (n); \
} while (0)
#define L_MPY(v, a) ((v) *= (a))
#define L_CLR(v) ((v) = 0)
#define L_ISNEG(v) ((v) < 0)
#define L_LINT(v, a) ((v) = (int64_t)(a) << 32)
#define L_GINT(v) ((v) < 0 ? -(-(v) >> 32) : (v) >> 32)
 
/*
* Generic NTP kernel interface
*
* These routines constitute the Network Time Protocol (NTP) interfaces
* for user and daemon application programs. The ntp_gettime() routine
* provides the time, maximum error (synch distance) and estimated error
* (dispersion) to client user application programs. The ntp_adjtime()
* routine is used by the NTP daemon to adjust the system clock to an
* externally derived time. The time offset and related variables set by
* this routine are used by other routines in this module to adjust the
* phase and frequency of the clock discipline loop which controls the
* system clock.
*
* When the kernel time is reckoned directly in nanoseconds (NTP_NANO
* defined), the time at each tick interrupt is derived directly from
* the kernel time variable. When the kernel time is reckoned in
* microseconds, (NTP_NANO undefined), the time is derived from the
* kernel time variable together with a variable representing the
* leftover nanoseconds at the last tick interrupt. In either case, the
* current nanosecond time is reckoned from these values plus an
* interpolated value derived by the clock routines in another
* architecture-specific module. The interpolation can use either a
* dedicated counter or a processor cycle counter (PCC) implemented in
* some architectures.
*
* Note that all routines must run at priority splclock or higher.
*/
/*
* Phase/frequency-lock loop (PLL/FLL) definitions
*
* The nanosecond clock discipline uses two variable types, time
* variables and frequency variables. Both types are represented as 64-
* bit fixed-point quantities with the decimal point between two 32-bit
* halves. On a 32-bit machine, each half is represented as a single
* word and mathematical operations are done using multiple-precision
* arithmetic. On a 64-bit machine, ordinary computer arithmetic is
* used.
*
* A time variable is a signed 64-bit fixed-point number in ns and
* fraction. It represents the remaining time offset to be amortized
* over succeeding tick interrupts. The maximum time offset is about
* 0.5 s and the resolution is about 2.3e-10 ns.
*
* 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* |s s s| ns |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | fraction |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* A frequency variable is a signed 64-bit fixed-point number in ns/s
* and fraction. It represents the ns and fraction to be added to the
* kernel time variable at each second. The maximum frequency offset is
* about +-500000 ns/s and the resolution is about 2.3e-10 ns/s.
*
* 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* |s s s s s s s s s s s s s| ns/s |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | fraction |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
/*
* The following variables establish the state of the PLL/FLL and the
* residual time and frequency offset of the local clock.
*/
#define SHIFT_PLL 4 /* PLL loop gain (shift) */
#define SHIFT_FLL 2 /* FLL loop gain (shift) */
 
static int time_state = TIME_OK; /* clock state */
static int time_status = STA_UNSYNC; /* clock status bits */
static long time_tai; /* TAI offset (s) */
static long time_monitor; /* last time offset scaled (ns) */
static long time_constant; /* poll interval (shift) (s) */
static long time_precision = 1; /* clock precision (ns) */
static long time_maxerror = MAXPHASE / 1000; /* maximum error (us) */
static long time_esterror = MAXPHASE / 1000; /* estimated error (us) */
static long time_reftime; /* time at last adjustment (s) */
static l_fp time_offset; /* time offset (ns) */
static l_fp time_freq; /* frequency offset (ns/s) */
static l_fp time_adj; /* tick adjust (ns/s) */
 
static int64_t time_adjtime; /* correction from adjtime(2) (usec) */
 
#ifdef PPS_SYNC
/*
* The following variables are used when a pulse-per-second (PPS) signal
* is available and connected via a modem control lead. They establish
* the engineering parameters of the clock discipline loop when
* controlled by the PPS signal.
*/
#define PPS_FAVG 2 /* min freq avg interval (s) (shift) */
#define PPS_FAVGDEF 8 /* default freq avg int (s) (shift) */
#define PPS_FAVGMAX 15 /* max freq avg interval (s) (shift) */
#define PPS_PAVG 4 /* phase avg interval (s) (shift) */
#define PPS_VALID 120 /* PPS signal watchdog max (s) */
#define PPS_MAXWANDER 100000 /* max PPS wander (ns/s) */
#define PPS_POPCORN 2 /* popcorn spike threshold (shift) */
 
static struct timespec pps_tf[3]; /* phase median filter */
static l_fp pps_freq; /* scaled frequency offset (ns/s) */
static long pps_fcount; /* frequency accumulator */
static long pps_jitter; /* nominal jitter (ns) */
static long pps_stabil; /* nominal stability (scaled ns/s) */
static long pps_lastsec; /* time at last calibration (s) */
static int pps_valid; /* signal watchdog counter */
static int pps_shift = PPS_FAVG; /* interval duration (s) (shift) */
static int pps_shiftmax = PPS_FAVGDEF; /* max interval duration (s) (shift) */
static int pps_intcnt; /* wander counter */
 
/*
* PPS signal quality monitors
*/
static long pps_calcnt; /* calibration intervals */
static long pps_jitcnt; /* jitter limit exceeded */
static long pps_stbcnt; /* stability limit exceeded */
static long pps_errcnt; /* calibration errors */
#endif /* PPS_SYNC */
/*
* End of phase/frequency-lock loop (PLL/FLL) definitions
*/
 
static void ntp_init(void);
static void hardupdate(long offset);
static void ntp_gettime1(struct ntptimeval *ntvp);
 
static void
ntp_gettime1(struct ntptimeval *ntvp)
{
struct timespec atv; /* nanosecond time */
 
GIANT_REQUIRED;
 
nanotime(&atv);
ntvp->time.tv_sec = atv.tv_sec;
ntvp->time.tv_nsec = atv.tv_nsec;
ntvp->maxerror = time_maxerror;
ntvp->esterror = time_esterror;
ntvp->tai = time_tai;
ntvp->time_state = time_state;
 
/*
* Status word error decode. If any of these conditions occur,
* an error is returned, instead of the status word. Most
* applications will care only about the fact the system clock
* may not be trusted, not about the details.
*
* Hardware or software error
*/
if ((time_status & (STA_UNSYNC | STA_CLOCKERR)) ||
 
/*
* PPS signal lost when either time or frequency synchronization
* requested
*/
(time_status & (STA_PPSFREQ | STA_PPSTIME) &&
!(time_status & STA_PPSSIGNAL)) ||
 
/*
* PPS jitter exceeded when time synchronization requested
*/
(time_status & STA_PPSTIME &&
time_status & STA_PPSJITTER) ||
 
/*
* PPS wander exceeded or calibration error when frequency
* synchronization requested
*/
(time_status & STA_PPSFREQ &&
time_status & (STA_PPSWANDER | STA_PPSERROR)))
ntvp->time_state = TIME_ERROR;
}
 
/*
* ntp_gettime() - NTP user application interface
*
* See the timex.h header file for synopsis and API description. Note
* that the TAI offset is returned in the ntvtimeval.tai structure
* member.
*/
#ifndef _SYS_SYSPROTO_H_
struct ntp_gettime_args {
struct ntptimeval *ntvp;
};
#endif
/* ARGSUSED */
int
ntp_gettime(struct thread *td, struct ntp_gettime_args *uap)
{
struct ntptimeval ntv;
 
mtx_lock(&Giant);
ntp_gettime1(&ntv);
mtx_unlock(&Giant);
 
return (copyout(&ntv, uap->ntvp, sizeof(ntv)));
}
 
static int
ntp_sysctl(SYSCTL_HANDLER_ARGS)
{
struct ntptimeval ntv; /* temporary structure */
 
ntp_gettime1(&ntv);
 
return (sysctl_handle_opaque(oidp, &ntv, sizeof(ntv), req));
}
 
SYSCTL_NODE(_kern, OID_AUTO, ntp_pll, CTLFLAG_RW, 0, "");
SYSCTL_PROC(_kern_ntp_pll, OID_AUTO, gettime, CTLTYPE_OPAQUE|CTLFLAG_RD,
0, sizeof(struct ntptimeval) , ntp_sysctl, "S,ntptimeval", "");
 
#ifdef PPS_SYNC
SYSCTL_INT(_kern_ntp_pll, OID_AUTO, pps_shiftmax, CTLFLAG_RW, &pps_shiftmax, 0, "");
SYSCTL_INT(_kern_ntp_pll, OID_AUTO, pps_shift, CTLFLAG_RW, &pps_shift, 0, "");
SYSCTL_INT(_kern_ntp_pll, OID_AUTO, time_monitor, CTLFLAG_RD, &time_monitor, 0, "");
 
SYSCTL_OPAQUE(_kern_ntp_pll, OID_AUTO, pps_freq, CTLFLAG_RD, &pps_freq, sizeof(pps_freq), "I", "");
SYSCTL_OPAQUE(_kern_ntp_pll, OID_AUTO, time_freq, CTLFLAG_RD, &time_freq, sizeof(time_freq), "I", "");
#endif
/*
* ntp_adjtime() - NTP daemon application interface
*
* See the timex.h header file for synopsis and API description. Note
* that the timex.constant structure member has a dual purpose to set
* the time constant and to set the TAI offset.
*/
#ifndef _SYS_SYSPROTO_H_
struct ntp_adjtime_args {
struct timex *tp;
};
#endif
 
/*
* MPSAFE
*/
int
ntp_adjtime(struct thread *td, struct ntp_adjtime_args *uap)
{
struct timex ntv; /* temporary structure */
long freq; /* frequency ns/s) */
int modes; /* mode bits from structure */
int s; /* caller priority */
int error;
 
error = copyin((caddr_t)uap->tp, (caddr_t)&ntv, sizeof(ntv));
if (error)
return(error);
 
/*
* Update selected clock variables - only the superuser can
* change anything. Note that there is no error checking here on
* the assumption the superuser should know what it is doing.
* Note that either the time constant or TAI offset are loaded
* from the ntv.constant member, depending on the mode bits. If
* the STA_PLL bit in the status word is cleared, the state and
* status words are reset to the initial values at boot.
*/
mtx_lock(&Giant);
modes = ntv.modes;
if (modes)
error = suser(td);
if (error)
goto done2;
s = splclock();
if (modes & MOD_MAXERROR)
time_maxerror = ntv.maxerror;
if (modes & MOD_ESTERROR)
time_esterror = ntv.esterror;
if (modes & MOD_STATUS) {
if (time_status & STA_PLL && !(ntv.status & STA_PLL)) {
time_state = TIME_OK;
time_status = STA_UNSYNC;
#ifdef PPS_SYNC
pps_shift = PPS_FAVG;
#endif /* PPS_SYNC */
}
time_status &= STA_RONLY;
time_status |= ntv.status & ~STA_RONLY;
}
if (modes & MOD_TIMECONST) {
if (ntv.constant < 0)
time_constant = 0;
else if (ntv.constant > MAXTC)
time_constant = MAXTC;
else
time_constant = ntv.constant;
}
if (modes & MOD_TAI) {
if (ntv.constant > 0) /* XXX zero & negative numbers ? */
time_tai = ntv.constant;
}
#ifdef PPS_SYNC
if (modes & MOD_PPSMAX) {
if (ntv.shift < PPS_FAVG)
pps_shiftmax = PPS_FAVG;
else if (ntv.shift > PPS_FAVGMAX)
pps_shiftmax = PPS_FAVGMAX;
else
pps_shiftmax = ntv.shift;
}
#endif /* PPS_SYNC */
if (modes & MOD_NANO)
time_status |= STA_NANO;
if (modes & MOD_MICRO)
time_status &= ~STA_NANO;
if (modes & MOD_CLKB)
time_status |= STA_CLK;
if (modes & MOD_CLKA)
time_status &= ~STA_CLK;
if (modes & MOD_FREQUENCY) {
freq = (ntv.freq * 1000LL) >> 16;
if (freq > MAXFREQ)
L_LINT(time_freq, MAXFREQ);
else if (freq < -MAXFREQ)
L_LINT(time_freq, -MAXFREQ);
else {
/*
* ntv.freq is [PPM * 2^16] = [us/s * 2^16]
* time_freq is [ns/s * 2^32]
*/
time_freq = ntv.freq * 1000LL * 65536LL;
}
#ifdef PPS_SYNC
pps_freq = time_freq;
#endif /* PPS_SYNC */
}
if (modes & MOD_OFFSET) {
if (time_status & STA_NANO)
hardupdate(ntv.offset);
else
hardupdate(ntv.offset * 1000);
}
 
/*
* Retrieve all clock variables. Note that the TAI offset is
* returned only by ntp_gettime();
*/
if (time_status & STA_NANO)
ntv.offset = L_GINT(time_offset);
else
ntv.offset = L_GINT(time_offset) / 1000; /* XXX rounding ? */
ntv.freq = L_GINT((time_freq / 1000LL) << 16);
ntv.maxerror = time_maxerror;
ntv.esterror = time_esterror;
ntv.status = time_status;
ntv.constant = time_constant;
if (time_status & STA_NANO)
ntv.precision = time_precision;
else
ntv.precision = time_precision / 1000;
ntv.tolerance = MAXFREQ * SCALE_PPM;
#ifdef PPS_SYNC
ntv.shift = pps_shift;
ntv.ppsfreq = L_GINT((pps_freq / 1000LL) << 16);
if (time_status & STA_NANO)
ntv.jitter = pps_jitter;
else
ntv.jitter = pps_jitter / 1000;
ntv.stabil = pps_stabil;
ntv.calcnt = pps_calcnt;
ntv.errcnt = pps_errcnt;
ntv.jitcnt = pps_jitcnt;
ntv.stbcnt = pps_stbcnt;
#endif /* PPS_SYNC */
splx(s);
 
error = copyout((caddr_t)&ntv, (caddr_t)uap->tp, sizeof(ntv));
if (error)
goto done2;
 
/*
* Status word error decode. See comments in
* ntp_gettime() routine.
*/
if ((time_status & (STA_UNSYNC | STA_CLOCKERR)) ||
(time_status & (STA_PPSFREQ | STA_PPSTIME) &&
!(time_status & STA_PPSSIGNAL)) ||
(time_status & STA_PPSTIME &&
time_status & STA_PPSJITTER) ||
(time_status & STA_PPSFREQ &&
time_status & (STA_PPSWANDER | STA_PPSERROR))) {
td->td_retval[0] = TIME_ERROR;
} else {
td->td_retval[0] = time_state;
}
done2:
mtx_unlock(&Giant);
return (error);
}
 
/*
* second_overflow() - called after ntp_tick_adjust()
*
* This routine is ordinarily called immediately following the above
* routine ntp_tick_adjust(). While these two routines are normally
* combined, they are separated here only for the purposes of
* simulation.
*/
void
ntp_update_second(int64_t *adjustment, time_t *newsec)
{
int tickrate;
l_fp ftemp; /* 32/64-bit temporary */
 
/*
* On rollover of the second both the nanosecond and microsecond
* clocks are updated and the state machine cranked as
* necessary. The phase adjustment to be used for the next
* second is calculated and the maximum error is increased by
* the tolerance.
*/
time_maxerror += MAXFREQ / 1000;
 
/*
* Leap second processing. If in leap-insert state at
* the end of the day, the system clock is set back one
* second; if in leap-delete state, the system clock is
* set ahead one second. The nano_time() routine or
* external clock driver will insure that reported time
* is always monotonic.
*/
switch (time_state) {
 
/*
* No warning.
*/
case TIME_OK:
if (time_status & STA_INS)
time_state = TIME_INS;
else if (time_status & STA_DEL)
time_state = TIME_DEL;
break;
 
/*
* Insert second 23:59:60 following second
* 23:59:59.
*/
case TIME_INS:
if (!(time_status & STA_INS))
time_state = TIME_OK;
else if ((*newsec) % 86400 == 0) {
(*newsec)--;
time_state = TIME_OOP;
time_tai++;
}
break;
 
/*
* Delete second 23:59:59.
*/
case TIME_DEL:
if (!(time_status & STA_DEL))
time_state = TIME_OK;
else if (((*newsec) + 1) % 86400 == 0) {
(*newsec)++;
time_tai--;
time_state = TIME_WAIT;
}
break;
 
/*
* Insert second in progress.
*/
case TIME_OOP:
time_state = TIME_WAIT;
break;
 
/*
* Wait for status bits to clear.
*/
case TIME_WAIT:
if (!(time_status & (STA_INS | STA_DEL)))
time_state = TIME_OK;
}
 
/*
* Compute the total time adjustment for the next second
* in ns. The offset is reduced by a factor depending on
* whether the PPS signal is operating. Note that the
* value is in effect scaled by the clock frequency,
* since the adjustment is added at each tick interrupt.
*/
ftemp = time_offset;
#ifdef PPS_SYNC
/* XXX even if PPS signal dies we should finish adjustment ? */
if (time_status & STA_PPSTIME && time_status &
STA_PPSSIGNAL)
L_RSHIFT(ftemp, pps_shift);
else
L_RSHIFT(ftemp, SHIFT_PLL + time_constant);
#else
L_RSHIFT(ftemp, SHIFT_PLL + time_constant);
#endif /* PPS_SYNC */
time_adj = ftemp;
L_SUB(time_offset, ftemp);
L_ADD(time_adj, time_freq);
/*
* Apply any correction from adjtime(2). If more than one second
* off we slew at a rate of 5ms/s (5000 PPM) else 500us/s (500PPM)
* until the last second is slewed the final < 500 usecs.
*/
if (time_adjtime != 0) {
if (time_adjtime > 1000000)
tickrate = 5000;
else if (time_adjtime < -1000000)
tickrate = -5000;
else if (time_adjtime > 500)
tickrate = 500;
else if (time_adjtime < -500)
tickrate = -500;
else
tickrate = time_adjtime;
time_adjtime -= tickrate;
L_LINT(ftemp, tickrate * 1000);
L_ADD(time_adj, ftemp);
}
*adjustment = time_adj;
#ifdef PPS_SYNC
if (pps_valid > 0)
pps_valid--;
else
time_status &= ~STA_PPSSIGNAL;
#endif /* PPS_SYNC */
}
 
/*
* ntp_init() - initialize variables and structures
*
* This routine must be called after the kernel variables hz and tick
* are set or changed and before the next tick interrupt. In this
* particular implementation, these values are assumed set elsewhere in
* the kernel. The design allows the clock frequency and tick interval
* to be changed while the system is running. So, this routine should
* probably be integrated with the code that does that.
*/
static void
ntp_init()
{
 
/*
* The following variables are initialized only at startup. Only
* those structures not cleared by the compiler need to be
* initialized, and these only in the simulator. In the actual
* kernel, any nonzero values here will quickly evaporate.
*/
L_CLR(time_offset);
L_CLR(time_freq);
#ifdef PPS_SYNC
pps_tf[0].tv_sec = pps_tf[0].tv_nsec = 0;
pps_tf[1].tv_sec = pps_tf[1].tv_nsec = 0;
pps_tf[2].tv_sec = pps_tf[2].tv_nsec = 0;
pps_fcount = 0;
L_CLR(pps_freq);
#endif /* PPS_SYNC */
}
 
SYSINIT(ntpclocks, SI_SUB_CLOCKS, SI_ORDER_MIDDLE, ntp_init, NULL)
 
/*
* hardupdate() - local clock update
*
* This routine is called by ntp_adjtime() to update the local clock
* phase and frequency. The implementation is of an adaptive-parameter,
* hybrid phase/frequency-lock loop (PLL/FLL). The routine computes new
* time and frequency offset estimates for each call. If the kernel PPS
* discipline code is configured (PPS_SYNC), the PPS signal itself
* determines the new time offset, instead of the calling argument.
* Presumably, calls to ntp_adjtime() occur only when the caller
* believes the local clock is valid within some bound (+-128 ms with
* NTP). If the caller's time is far different than the PPS time, an
* argument will ensue, and it's not clear who will lose.
*
* For uncompensated quartz crystal oscillators and nominal update
* intervals less than 256 s, operation should be in phase-lock mode,
* where the loop is disciplined to phase. For update intervals greater
* than 1024 s, operation should be in frequency-lock mode, where the
* loop is disciplined to frequency. Between 256 s and 1024 s, the mode
* is selected by the STA_MODE status bit.
*/
static void
hardupdate(offset)
long offset; /* clock offset (ns) */
{
long mtemp;
l_fp ftemp;
 
/*
* Select how the phase is to be controlled and from which
* source. If the PPS signal is present and enabled to
* discipline the time, the PPS offset is used; otherwise, the
* argument offset is used.
*/
if (!(time_status & STA_PLL))
return;
if (!(time_status & STA_PPSTIME && time_status &
STA_PPSSIGNAL)) {
if (offset > MAXPHASE)
time_monitor = MAXPHASE;
else if (offset < -MAXPHASE)
time_monitor = -MAXPHASE;
else
time_monitor = offset;
L_LINT(time_offset, time_monitor);
}
 
/*
* Select how the frequency is to be controlled and in which
* mode (PLL or FLL). If the PPS signal is present and enabled
* to discipline the frequency, the PPS frequency is used;
* otherwise, the argument offset is used to compute it.
*/
if (time_status & STA_PPSFREQ && time_status & STA_PPSSIGNAL) {
time_reftime = time_second;
return;
}
if (time_status & STA_FREQHOLD || time_reftime == 0)
time_reftime = time_second;
mtemp = time_second - time_reftime;
L_LINT(ftemp, time_monitor);
L_RSHIFT(ftemp, (SHIFT_PLL + 2 + time_constant) << 1);
L_MPY(ftemp, mtemp);
L_ADD(time_freq, ftemp);
time_status &= ~STA_MODE;
if (mtemp >= MINSEC && (time_status & STA_FLL || mtemp >
MAXSEC)) {
L_LINT(ftemp, (time_monitor << 4) / mtemp);
L_RSHIFT(ftemp, SHIFT_FLL + 4);
L_ADD(time_freq, ftemp);
time_status |= STA_MODE;
}
time_reftime = time_second;
if (L_GINT(time_freq) > MAXFREQ)
L_LINT(time_freq, MAXFREQ);
else if (L_GINT(time_freq) < -MAXFREQ)
L_LINT(time_freq, -MAXFREQ);
}
 
#ifdef PPS_SYNC
/*
* hardpps() - discipline CPU clock oscillator to external PPS signal
*
* This routine is called at each PPS interrupt in order to discipline
* the CPU clock oscillator to the PPS signal. There are two independent
* first-order feedback loops, one for the phase, the other for the
* frequency. The phase loop measures and grooms the PPS phase offset
* and leaves it in a handy spot for the seconds overflow routine. The
* frequency loop averages successive PPS phase differences and
* calculates the PPS frequency offset, which is also processed by the
* seconds overflow routine. The code requires the caller to capture the
* time and architecture-dependent hardware counter values in
* nanoseconds at the on-time PPS signal transition.
*
* Note that, on some Unix systems this routine runs at an interrupt
* priority level higher than the timer interrupt routine hardclock().
* Therefore, the variables used are distinct from the hardclock()
* variables, except for the actual time and frequency variables, which
* are determined by this routine and updated atomically.
*/
void
hardpps(tsp, nsec)
struct timespec *tsp; /* time at PPS */
long nsec; /* hardware counter at PPS */
{
long u_sec, u_nsec, v_nsec; /* temps */
l_fp ftemp;
 
/*
* The signal is first processed by a range gate and frequency
* discriminator. The range gate rejects noise spikes outside
* the range +-500 us. The frequency discriminator rejects input
* signals with apparent frequency outside the range 1 +-500
* PPM. If two hits occur in the same second, we ignore the
* later hit; if not and a hit occurs outside the range gate,
* keep the later hit for later comparison, but do not process
* it.
*/
time_status |= STA_PPSSIGNAL | STA_PPSJITTER;
time_status &= ~(STA_PPSWANDER | STA_PPSERROR);
pps_valid = PPS_VALID;
u_sec = tsp->tv_sec;
u_nsec = tsp->tv_nsec;
if (u_nsec >= (NANOSECOND >> 1)) {
u_nsec -= NANOSECOND;
u_sec++;
}
v_nsec = u_nsec - pps_tf[0].tv_nsec;
if (u_sec == pps_tf[0].tv_sec && v_nsec < NANOSECOND -
MAXFREQ)
return;
pps_tf[2] = pps_tf[1];
pps_tf[1] = pps_tf[0];
pps_tf[0].tv_sec = u_sec;
pps_tf[0].tv_nsec = u_nsec;
 
/*
* Compute the difference between the current and previous
* counter values. If the difference exceeds 0.5 s, assume it
* has wrapped around, so correct 1.0 s. If the result exceeds
* the tick interval, the sample point has crossed a tick
* boundary during the last second, so correct the tick. Very
* intricate.
*/
u_nsec = nsec;
if (u_nsec > (NANOSECOND >> 1))
u_nsec -= NANOSECOND;
else if (u_nsec < -(NANOSECOND >> 1))
u_nsec += NANOSECOND;
pps_fcount += u_nsec;
if (v_nsec > MAXFREQ || v_nsec < -MAXFREQ)
return;
time_status &= ~STA_PPSJITTER;
 
/*
* A three-stage median filter is used to help denoise the PPS
* time. The median sample becomes the time offset estimate; the
* difference between the other two samples becomes the time
* dispersion (jitter) estimate.
*/
if (pps_tf[0].tv_nsec > pps_tf[1].tv_nsec) {
if (pps_tf[1].tv_nsec > pps_tf[2].tv_nsec) {
v_nsec = pps_tf[1].tv_nsec; /* 0 1 2 */
u_nsec = pps_tf[0].tv_nsec - pps_tf[2].tv_nsec;
} else if (pps_tf[2].tv_nsec > pps_tf[0].tv_nsec) {
v_nsec = pps_tf[0].tv_nsec; /* 2 0 1 */
u_nsec = pps_tf[2].tv_nsec - pps_tf[1].tv_nsec;
} else {
v_nsec = pps_tf[2].tv_nsec; /* 0 2 1 */
u_nsec = pps_tf[0].tv_nsec - pps_tf[1].tv_nsec;
}
} else {
if (pps_tf[1].tv_nsec < pps_tf[2].tv_nsec) {
v_nsec = pps_tf[1].tv_nsec; /* 2 1 0 */
u_nsec = pps_tf[2].tv_nsec - pps_tf[0].tv_nsec;
} else if (pps_tf[2].tv_nsec < pps_tf[0].tv_nsec) {
v_nsec = pps_tf[0].tv_nsec; /* 1 0 2 */
u_nsec = pps_tf[1].tv_nsec - pps_tf[2].tv_nsec;
} else {
v_nsec = pps_tf[2].tv_nsec; /* 1 2 0 */
u_nsec = pps_tf[1].tv_nsec - pps_tf[0].tv_nsec;
}
}
 
/*
* Nominal jitter is due to PPS signal noise and interrupt
* latency. If it exceeds the popcorn threshold, the sample is
* discarded. otherwise, if so enabled, the time offset is
* updated. We can tolerate a modest loss of data here without
* much degrading time accuracy.
*/
if (u_nsec > (pps_jitter << PPS_POPCORN)) {
time_status |= STA_PPSJITTER;
pps_jitcnt++;
} else if (time_status & STA_PPSTIME) {
time_monitor = -v_nsec;
L_LINT(time_offset, time_monitor);
}
pps_jitter += (u_nsec - pps_jitter) >> PPS_FAVG;
u_sec = pps_tf[0].tv_sec - pps_lastsec;
if (u_sec < (1 << pps_shift))
return;
 
/*
* At the end of the calibration interval the difference between
* the first and last counter values becomes the scaled
* frequency. It will later be divided by the length of the
* interval to determine the frequency update. If the frequency
* exceeds a sanity threshold, or if the actual calibration
* interval is not equal to the expected length, the data are
* discarded. We can tolerate a modest loss of data here without
* much degrading frequency accuracy.
*/
pps_calcnt++;
v_nsec = -pps_fcount;
pps_lastsec = pps_tf[0].tv_sec;
pps_fcount = 0;
u_nsec = MAXFREQ << pps_shift;
if (v_nsec > u_nsec || v_nsec < -u_nsec || u_sec != (1 <<
pps_shift)) {
time_status |= STA_PPSERROR;
pps_errcnt++;
return;
}
 
/*
* Here the raw frequency offset and wander (stability) is
* calculated. If the wander is less than the wander threshold
* for four consecutive averaging intervals, the interval is
* doubled; if it is greater than the threshold for four
* consecutive intervals, the interval is halved. The scaled
* frequency offset is converted to frequency offset. The
* stability metric is calculated as the average of recent
* frequency changes, but is used only for performance
* monitoring.
*/
L_LINT(ftemp, v_nsec);
L_RSHIFT(ftemp, pps_shift);
L_SUB(ftemp, pps_freq);
u_nsec = L_GINT(ftemp);
if (u_nsec > PPS_MAXWANDER) {
L_LINT(ftemp, PPS_MAXWANDER);
pps_intcnt--;
time_status |= STA_PPSWANDER;
pps_stbcnt++;
} else if (u_nsec < -PPS_MAXWANDER) {
L_LINT(ftemp, -PPS_MAXWANDER);
pps_intcnt--;
time_status |= STA_PPSWANDER;
pps_stbcnt++;
} else {
pps_intcnt++;
}
if (pps_intcnt >= 4) {
pps_intcnt = 4;
if (pps_shift < pps_shiftmax) {
pps_shift++;
pps_intcnt = 0;
}
} else if (pps_intcnt <= -4 || pps_shift > pps_shiftmax) {
pps_intcnt = -4;
if (pps_shift > PPS_FAVG) {
pps_shift--;
pps_intcnt = 0;
}
}
if (u_nsec < 0)
u_nsec = -u_nsec;
pps_stabil += (u_nsec * SCALE_PPM - pps_stabil) >> PPS_FAVG;
 
/*
* The PPS frequency is recalculated and clamped to the maximum
* MAXFREQ. If enabled, the system clock frequency is updated as
* well.
*/
L_ADD(pps_freq, ftemp);
u_nsec = L_GINT(pps_freq);
if (u_nsec > MAXFREQ)
L_LINT(pps_freq, MAXFREQ);
else if (u_nsec < -MAXFREQ)
L_LINT(pps_freq, -MAXFREQ);
if (time_status & STA_PPSFREQ)
time_freq = pps_freq;
}
#endif /* PPS_SYNC */
 
#ifndef _SYS_SYSPROTO_H_
struct adjtime_args {
struct timeval *delta;
struct timeval *olddelta;
};
#endif
/*
* MPSAFE
*/
/* ARGSUSED */
int
adjtime(struct thread *td, struct adjtime_args *uap)
{
struct timeval delta, olddelta, *deltap;
int error;
 
if (uap->delta) {
error = copyin(uap->delta, &delta, sizeof(delta));
if (error)
return (error);
deltap = &delta;
} else
deltap = NULL;
error = kern_adjtime(td, deltap, &olddelta);
if (uap->olddelta && error == 0)
error = copyout(&olddelta, uap->olddelta, sizeof(olddelta));
return (error);
}
 
int
kern_adjtime(struct thread *td, struct timeval *delta, struct timeval *olddelta)
{
struct timeval atv;
int error;
 
if ((error = suser(td)))
return (error);
 
mtx_lock(&Giant);
if (olddelta) {
atv.tv_sec = time_adjtime / 1000000;
atv.tv_usec = time_adjtime % 1000000;
if (atv.tv_usec < 0) {
atv.tv_usec += 1000000;
atv.tv_sec--;
}
*olddelta = atv;
}
if (delta)
time_adjtime = (int64_t)delta->tv_sec * 1000000 +
delta->tv_usec;
mtx_unlock(&Giant);
return (error);
}
 
/FreeBSD/mac_settime/trunk/origins/ntp/ntpd.c
0,0 → 1,1274
/*
* ntpd.c - main program for the fixed point NTP daemon
*/
 
#ifdef HAVE_CONFIG_H
# include <config.h>
#endif
 
#include "ntp_machine.h"
#include "ntpd.h"
#include "ntp_io.h"
#include "ntp_stdlib.h"
 
#ifdef SIM
#include "ntpsim.h"
#endif
 
#ifdef HAVE_UNISTD_H
# include <unistd.h>
#endif
#ifdef HAVE_SYS_STAT_H
# include <sys/stat.h>
#endif
#include <stdio.h>
#ifndef SYS_WINNT
# if !defined(VMS) /*wjm*/
# ifdef HAVE_SYS_PARAM_H
# include <sys/param.h>
# endif
# endif /* VMS */
# ifdef HAVE_SYS_SIGNAL_H
# include <sys/signal.h>
# else
# include <signal.h>
# endif
# ifdef HAVE_SYS_IOCTL_H
# include <sys/ioctl.h>
# endif /* HAVE_SYS_IOCTL_H */
# ifdef HAVE_SYS_RESOURCE_H
# include <sys/resource.h>
# endif /* HAVE_SYS_RESOURCE_H */
#else
# include <signal.h>
# include <process.h>
# include <io.h>
# include "../libntp/log.h"
# include <clockstuff.h>
# include <crtdbg.h>
#endif /* SYS_WINNT */
#if defined(HAVE_RTPRIO)
# ifdef HAVE_SYS_RESOURCE_H
# include <sys/resource.h>
# endif
# ifdef HAVE_SYS_LOCK_H
# include <sys/lock.h>
# endif
# include <sys/rtprio.h>
#else
# ifdef HAVE_PLOCK
# ifdef HAVE_SYS_LOCK_H
# include <sys/lock.h>
# endif
# endif
#endif
#if defined(HAVE_SCHED_SETSCHEDULER)
# ifdef HAVE_SCHED_H
# include <sched.h>
# else
# ifdef HAVE_SYS_SCHED_H
# include <sys/sched.h>
# endif
# endif
#endif
#if defined(HAVE_SYS_MMAN_H)
# include <sys/mman.h>
#endif
 
#ifdef HAVE_TERMIOS_H
# include <termios.h>
#endif
 
#ifdef SYS_DOMAINOS
# include <apollo/base.h>
#endif /* SYS_DOMAINOS */
 
#include "recvbuff.h"
#include "ntp_cmdargs.h"
 
#if 0 /* HMS: I don't think we need this. 961223 */
#ifdef LOCK_PROCESS
# ifdef SYS_SOLARIS
# include <sys/mman.h>
# else
# include <sys/lock.h>
# endif
#endif
#endif
 
#ifdef _AIX
# include <ulimit.h>
#endif /* _AIX */
 
#ifdef SCO5_CLOCK
# include <sys/ci/ciioctl.h>
#endif
 
#ifdef HAVE_CLOCKCTL
# include <ctype.h>
# include <grp.h>
# include <pwd.h>
#endif
 
/*
* Signals we catch for debugging. If not debugging we ignore them.
*/
#define MOREDEBUGSIG SIGUSR1
#define LESSDEBUGSIG SIGUSR2
 
/*
* Signals which terminate us gracefully.
*/
#ifndef SYS_WINNT
# define SIGDIE1 SIGHUP
# define SIGDIE3 SIGQUIT
# define SIGDIE2 SIGINT
# define SIGDIE4 SIGTERM
#endif /* SYS_WINNT */
 
#if defined SYS_WINNT
/* handles for various threads, process, and objects */
HANDLE ResolverThreadHandle = NULL;
/* variables used to inform the Service Control Manager of our current state */
BOOL NoWinService = FALSE;
SERVICE_STATUS ssStatus;
SERVICE_STATUS_HANDLE sshStatusHandle;
HANDLE WaitHandles[3] = { NULL, NULL, NULL };
char szMsgPath[255];
static BOOL WINAPI OnConsoleEvent(DWORD dwCtrlType);
BOOL init_randfile();
#endif /* SYS_WINNT */
 
/*
* Scheduling priority we run at
*/
#define NTPD_PRIO (-12)
 
int priority_done = 2; /* 0 - Set priority */
/* 1 - priority is OK where it is */
/* 2 - Don't set priority */
/* 1 and 2 are pretty much the same */
 
/*
* Debugging flag
*/
volatile int debug;
 
/*
* Set the processing not to be in the forground
*/
int forground_process = FALSE;
 
/*
* No-fork flag. If set, we do not become a background daemon.
*/
int nofork;
 
#ifdef HAVE_CLOCKCTL
char *user = NULL; /* User to switch to */
char *group = NULL; /* group to switch to */
char *chrootdir = NULL; /* directory to chroot to */
int sw_uid;
int sw_gid;
char *endp;
struct group *gr;
struct passwd *pw;
#endif /* HAVE_CLOCKCTL */
 
/*
* Initializing flag. All async routines watch this and only do their
* thing when it is clear.
*/
int initializing;
 
/*
* Version declaration
*/
extern const char *Version;
 
int was_alarmed;
 
#ifdef DECL_SYSCALL
/*
* We put this here, since the argument profile is syscall-specific
*/
extern int syscall P((int, ...));
#endif /* DECL_SYSCALL */
 
 
#ifdef SIGDIE2
static RETSIGTYPE finish P((int));
#endif /* SIGDIE2 */
 
#ifdef DEBUG
#ifndef SYS_WINNT
static RETSIGTYPE moredebug P((int));
static RETSIGTYPE lessdebug P((int));
#endif
#else /* not DEBUG */
static RETSIGTYPE no_debug P((int));
#endif /* not DEBUG */
 
int ntpdmain P((int, char **));
static void set_process_priority P((void));
 
#ifdef SIM
int
main(
int argc,
char *argv[]
)
{
return ntpsim(argc, argv);
}
#else /* SIM */
#ifdef NO_MAIN_ALLOWED
CALL(ntpd,"ntpd",ntpdmain);
#else
int
main(
int argc,
char *argv[]
)
{
return ntpdmain(argc, argv);
}
#endif
#endif /* SIM */
 
#ifdef _AIX
/*
* OK. AIX is different than solaris in how it implements plock().
* If you do NOT adjust the stack limit, you will get the MAXIMUM
* stack size allocated and PINNED with you program. To check the
* value, use ulimit -a.
*
* To fix this, we create an automatic variable and set our stack limit
* to that PLUS 32KB of extra space (we need some headroom).
*
* This subroutine gets the stack address.
*
* Grover Davidson and Matt Ladendorf
*
*/
static char *
get_aix_stack(void)
{
char ch;
return (&ch);
}
 
/*
* Signal handler for SIGDANGER.
*/
static void
catch_danger(int signo)
{
msyslog(LOG_INFO, "ntpd: setpgid(): %m");
/* Make the system believe we'll free something, but don't do it! */
return;
}
#endif /* _AIX */
 
/*
* Set the process priority
*/
static void
set_process_priority(void)
{
 
#ifdef DEBUG
if (debug > 1)
msyslog(LOG_DEBUG, "set_process_priority: %s: priority_done is <%d>",
((priority_done)
? "Leave priority alone"
: "Attempt to set priority"
),
priority_done);
#endif /* DEBUG */
 
#ifdef SYS_WINNT
priority_done += NT_set_process_priority();
#endif
 
#if defined(HAVE_SCHED_SETSCHEDULER)
if (!priority_done) {
extern int config_priority_override, config_priority;
int pmax, pmin;
struct sched_param sched;
 
pmax = sched_get_priority_max(SCHED_FIFO);
sched.sched_priority = pmax;
if ( config_priority_override ) {
pmin = sched_get_priority_min(SCHED_FIFO);
if ( config_priority > pmax )
sched.sched_priority = pmax;
else if ( config_priority < pmin )
sched.sched_priority = pmin;
else
sched.sched_priority = config_priority;
}
if ( sched_setscheduler(0, SCHED_FIFO, &sched) == -1 )
msyslog(LOG_ERR, "sched_setscheduler(): %m");
else
++priority_done;
}
#endif /* HAVE_SCHED_SETSCHEDULER */
#if defined(HAVE_RTPRIO)
# ifdef RTP_SET
if (!priority_done) {
struct rtprio srtp;
 
srtp.type = RTP_PRIO_REALTIME; /* was: RTP_PRIO_NORMAL */
srtp.prio = 0; /* 0 (hi) -> RTP_PRIO_MAX (31,lo) */
 
if (rtprio(RTP_SET, getpid(), &srtp) < 0)
msyslog(LOG_ERR, "rtprio() error: %m");
else
++priority_done;
}
# else /* not RTP_SET */
if (!priority_done) {
if (rtprio(0, 120) < 0)
msyslog(LOG_ERR, "rtprio() error: %m");
else
++priority_done;
}
# endif /* not RTP_SET */
#endif /* HAVE_RTPRIO */
#if defined(NTPD_PRIO) && NTPD_PRIO != 0
# ifdef HAVE_ATT_NICE
if (!priority_done) {
errno = 0;
if (-1 == nice (NTPD_PRIO) && errno != 0)
msyslog(LOG_ERR, "nice() error: %m");
else
++priority_done;
}
# endif /* HAVE_ATT_NICE */
# ifdef HAVE_BSD_NICE
if (!priority_done) {
if (-1 == setpriority(PRIO_PROCESS, 0, NTPD_PRIO))
msyslog(LOG_ERR, "setpriority() error: %m");
else
++priority_done;
}
# endif /* HAVE_BSD_NICE */
#endif /* NTPD_PRIO && NTPD_PRIO != 0 */
if (!priority_done)
msyslog(LOG_ERR, "set_process_priority: No way found to improve our priority");
}
 
 
/*
* Main program. Initialize us, disconnect us from the tty if necessary,
* and loop waiting for I/O and/or timer expiries.
*/
int
ntpdmain(
int argc,
char *argv[]
)
{
l_fp now;
char *cp;
struct recvbuf *rbuflist;
struct recvbuf *rbuf;
#ifdef _AIX /* HMS: ifdef SIGDANGER? */
struct sigaction sa;
#endif
 
initializing = 1; /* mark that we are initializing */
debug = 0; /* no debugging by default */
nofork = 0; /* will fork by default */
 
#ifdef HAVE_UMASK
{
mode_t uv;
 
uv = umask(0);
if(uv)
(void) umask(uv);
else
(void) umask(022);
}
#endif
 
#if defined(HAVE_GETUID) && !defined(MPE) /* MPE lacks the concept of root */
{
uid_t uid;
 
uid = getuid();
if (uid)
{
msyslog(LOG_ERR, "ntpd: must be run as root, not uid %ld", (long)uid);
exit(1);
}
}
#endif
 
#ifdef SYS_WINNT
/* Set the Event-ID message-file name. */
if (!GetModuleFileName(NULL, szMsgPath, sizeof(szMsgPath))) {
msyslog(LOG_ERR, "GetModuleFileName(PGM_EXE_FILE) failed: %m\n");
exit(1);
}
addSourceToRegistry("NTP", szMsgPath);
#endif
getstartup(argc, argv); /* startup configuration, may set debug */
 
if (debug)
printf("%s\n", Version);
 
/*
* Initialize random generator and public key pair
*/
#ifdef SYS_WINNT
/* Initialize random file before OpenSSL checks */
if(!init_randfile())
msyslog(LOG_ERR, "Unable to initialize .rnd file\n");
#endif
get_systime(&now);
SRANDOM((int)(now.l_i * now.l_uf));
 
#if !defined(VMS)
# ifndef NODETACH
/*
* Detach us from the terminal. May need an #ifndef GIZMO.
*/
# ifdef DEBUG
if (!debug && !nofork)
# else /* DEBUG */
if (!nofork)
# endif /* DEBUG */
{
# ifndef SYS_WINNT
# ifdef HAVE_DAEMON
daemon(0, 0);
# else /* not HAVE_DAEMON */
if (fork()) /* HMS: What about a -1? */
exit(0);
 
{
#if !defined(F_CLOSEM)
u_long s;
int max_fd;
#endif /* not F_CLOSEM */
 
#if defined(F_CLOSEM)
/*
* From 'Writing Reliable AIX Daemons,' SG24-4946-00,
* by Eric Agar (saves us from doing 32767 system
* calls)
*/
if (fcntl(0, F_CLOSEM, 0) == -1)
msyslog(LOG_ERR, "ntpd: failed to close open files(): %m");
#else /* not F_CLOSEM */
 
# if defined(HAVE_SYSCONF) && defined(_SC_OPEN_MAX)
max_fd = sysconf(_SC_OPEN_MAX);
# else /* HAVE_SYSCONF && _SC_OPEN_MAX */
max_fd = getdtablesize();
# endif /* HAVE_SYSCONF && _SC_OPEN_MAX */
for (s = 0; s < max_fd; s++)
(void) close((int)s);
#endif /* not F_CLOSEM */
(void) open("/", 0);
(void) dup2(0, 1);
(void) dup2(0, 2);
#ifdef SYS_DOMAINOS
{
uid_$t puid;
status_$t st;
 
proc2_$who_am_i(&puid);
proc2_$make_server(&puid, &st);
}
#endif /* SYS_DOMAINOS */
#if defined(HAVE_SETPGID) || defined(HAVE_SETSID)
# ifdef HAVE_SETSID
if (setsid() == (pid_t)-1)
msyslog(LOG_ERR, "ntpd: setsid(): %m");
# else
if (setpgid(0, 0) == -1)
msyslog(LOG_ERR, "ntpd: setpgid(): %m");
# endif
#else /* HAVE_SETPGID || HAVE_SETSID */
{
# if defined(TIOCNOTTY)
int fid;
 
fid = open("/dev/tty", 2);
if (fid >= 0)
{
(void) ioctl(fid, (u_long) TIOCNOTTY, (char *) 0);
(void) close(fid);
}
# endif /* defined(TIOCNOTTY) */
# ifdef HAVE_SETPGRP_0
(void) setpgrp();
# else /* HAVE_SETPGRP_0 */
(void) setpgrp(0, getpid());
# endif /* HAVE_SETPGRP_0 */
}
#endif /* HAVE_SETPGID || HAVE_SETSID */
#ifdef _AIX
/* Don't get killed by low-on-memory signal. */
sa.sa_handler = catch_danger;
sigemptyset(&sa.sa_mask);
sa.sa_flags = SA_RESTART;
 
(void) sigaction(SIGDANGER, &sa, NULL);
#endif /* _AIX */
}
# endif /* not HAVE_DAEMON */
# else /* SYS_WINNT */
 
{
if (NoWinService == FALSE) {
SERVICE_TABLE_ENTRY dispatchTable[] = {
{ TEXT("NetworkTimeProtocol"), (LPSERVICE_MAIN_FUNCTION)service_main },
{ NULL, NULL }
};
 
/* daemonize */
if (!StartServiceCtrlDispatcher(dispatchTable))
{
msyslog(LOG_ERR, "StartServiceCtrlDispatcher: %m");
ExitProcess(2);
}
}
else {
service_main(argc, argv);
return 0;
}
}
# endif /* SYS_WINNT */
}
# endif /* NODETACH */
# if defined(SYS_WINNT) && !defined(NODETACH)
else
service_main(argc, argv);
return 0; /* must return a value */
} /* end main */
 
/*
* If this runs as a service under NT, the main thread will block at
* StartServiceCtrlDispatcher() and another thread will be started by the
* Service Control Dispatcher which will begin execution at the routine
* specified in that call (viz. service_main)
*/
void
service_main(
DWORD argc,
LPTSTR *argv
)
{
char *cp;
struct recvbuf *rbuflist;
struct recvbuf *rbuf;
 
if(!debug && NoWinService == FALSE)
{
/* register our service control handler */
sshStatusHandle = RegisterServiceCtrlHandler( TEXT("NetworkTimeProtocol"),
(LPHANDLER_FUNCTION)service_ctrl);
if(sshStatusHandle == 0)
{
msyslog(LOG_ERR, "RegisterServiceCtrlHandler failed: %m");
return;
}
 
/* report pending status to Service Control Manager */
ssStatus.dwServiceType = SERVICE_WIN32_OWN_PROCESS;
ssStatus.dwCurrentState = SERVICE_START_PENDING;
ssStatus.dwControlsAccepted = SERVICE_ACCEPT_STOP;
ssStatus.dwWin32ExitCode = NO_ERROR;
ssStatus.dwServiceSpecificExitCode = 0;
ssStatus.dwCheckPoint = 1;
ssStatus.dwWaitHint = 5000;
if (!SetServiceStatus(sshStatusHandle, &ssStatus))
{
msyslog(LOG_ERR, "SetServiceStatus: %m");
ssStatus.dwCurrentState = SERVICE_STOPPED;
SetServiceStatus(sshStatusHandle, &ssStatus);
return;
}
 
} /* debug */
# endif /* defined(SYS_WINNT) && !defined(NODETACH) */
#endif /* VMS */
 
/*
* Logging. This may actually work on the gizmo board. Find a name
* to log with by using the basename of argv[0]
*/
cp = strrchr(argv[0], '/');
if (cp == 0)
cp = argv[0];
else
cp++;
 
debug = 0; /* will be immediately re-initialized 8-( */
getstartup(argc, argv); /* startup configuration, catch logfile this time */
 
#if !defined(VMS)
 
# ifndef LOG_DAEMON
openlog(cp, LOG_PID);
# else /* LOG_DAEMON */
 
# ifndef LOG_NTP
# define LOG_NTP LOG_DAEMON
# endif
openlog(cp, LOG_PID | LOG_NDELAY, LOG_NTP);
# ifdef DEBUG
if (debug)
setlogmask(LOG_UPTO(LOG_DEBUG));
else
# endif /* DEBUG */
setlogmask(LOG_UPTO(LOG_DEBUG)); /* @@@ was INFO */
# endif /* LOG_DAEMON */
#endif /* !SYS_WINNT && !VMS */
 
NLOG(NLOG_SYSINFO) /* conditional if clause for conditional syslog */
msyslog(LOG_NOTICE, "%s", Version);
 
#ifdef SYS_WINNT
/* GMS 1/18/1997
* TODO: lock the process in memory using SetProcessWorkingSetSize() and VirtualLock() functions
*
process_handle = GetCurrentProcess();
if (SetProcessWorkingSetSize(process_handle, 2097152 , 4194304 ) == TRUE) {
if (VirtualLock(0 , 4194304) == FALSE)
msyslog(LOG_ERR, "VirtualLock() failed: %m");
} else {
msyslog(LOG_ERR, "SetProcessWorkingSetSize() failed: %m");
}
*/
#endif /* SYS_WINNT */
 
#ifdef SCO5_CLOCK
/*
* SCO OpenServer's system clock offers much more precise timekeeping
* on the base CPU than the other CPUs (for multiprocessor systems),
* so we must lock to the base CPU.
*/
{
int fd = open("/dev/at1", O_RDONLY);
if (fd >= 0) {
int zero = 0;
if (ioctl(fd, ACPU_LOCK, &zero) < 0)
msyslog(LOG_ERR, "cannot lock to base CPU: %m\n");
close( fd );
} /* else ...
* If we can't open the device, this probably just isn't
* a multiprocessor system, so we're A-OK.
*/
}
#endif
 
#if defined(HAVE_MLOCKALL) && defined(MCL_CURRENT) && defined(MCL_FUTURE)
# ifdef HAVE_SETRLIMIT
/*
* Set the stack limit to something smaller, so that we don't lock a lot
* of unused stack memory.
*/
{
struct rlimit rl;
 
if (getrlimit(RLIMIT_STACK, &rl) != -1
&& (rl.rlim_cur = 20 * 4096) < rl.rlim_max)
{
if (setrlimit(RLIMIT_STACK, &rl) == -1)
{
msyslog(LOG_ERR,
"Cannot adjust stack limit for mlockall: %m");
}
}
}
# endif /* HAVE_SETRLIMIT */
/*
* lock the process into memory
*/
if (mlockall(MCL_CURRENT|MCL_FUTURE) < 0)
msyslog(LOG_ERR, "mlockall(): %m");
#else /* not (HAVE_MLOCKALL && MCL_CURRENT && MCL_FUTURE) */
# ifdef HAVE_PLOCK
# ifdef PROCLOCK
# ifdef _AIX
/*
* set the stack limit for AIX for plock().
* see get_aix_stack for more info.
*/
if (ulimit(SET_STACKLIM, (get_aix_stack() - 8*4096)) < 0)
{
msyslog(LOG_ERR,"Cannot adjust stack limit for plock on AIX: %m");
}
# endif /* _AIX */
/*
* lock the process into memory
*/
if (plock(PROCLOCK) < 0)
msyslog(LOG_ERR, "plock(PROCLOCK): %m");
# else /* not PROCLOCK */
# ifdef TXTLOCK
/*
* Lock text into ram
*/
if (plock(TXTLOCK) < 0)
msyslog(LOG_ERR, "plock(TXTLOCK) error: %m");
# else /* not TXTLOCK */
msyslog(LOG_ERR, "plock() - don't know what to lock!");
# endif /* not TXTLOCK */
# endif /* not PROCLOCK */
# endif /* HAVE_PLOCK */
#endif /* not (HAVE_MLOCKALL && MCL_CURRENT && MCL_FUTURE) */
 
/*
* Set up signals we pay attention to locally.
*/
#ifdef SIGDIE1
(void) signal_no_reset(SIGDIE1, finish);
#endif /* SIGDIE1 */
#ifdef SIGDIE2
(void) signal_no_reset(SIGDIE2, finish);
#endif /* SIGDIE2 */
#ifdef SIGDIE3
(void) signal_no_reset(SIGDIE3, finish);
#endif /* SIGDIE3 */
#ifdef SIGDIE4
(void) signal_no_reset(SIGDIE4, finish);
#endif /* SIGDIE4 */
 
#ifdef SIGBUS
(void) signal_no_reset(SIGBUS, finish);
#endif /* SIGBUS */
 
#if !defined(SYS_WINNT) && !defined(VMS)
# ifdef DEBUG
(void) signal_no_reset(MOREDEBUGSIG, moredebug);
(void) signal_no_reset(LESSDEBUGSIG, lessdebug);
# else
(void) signal_no_reset(MOREDEBUGSIG, no_debug);
(void) signal_no_reset(LESSDEBUGSIG, no_debug);
# endif /* DEBUG */
#endif /* !SYS_WINNT && !VMS */
 
/*
* Set up signals we should never pay attention to.
*/
#if defined SIGPIPE
(void) signal_no_reset(SIGPIPE, SIG_IGN);
#endif /* SIGPIPE */
 
#if defined SYS_WINNT
if (!SetConsoleCtrlHandler(OnConsoleEvent, TRUE)) {
msyslog(LOG_ERR, "Can't set console control handler: %m");
}
#endif
 
/*
* Call the init_ routines to initialize the data structures.
*/
#if defined (HAVE_IO_COMPLETION_PORT)
init_io_completion_port();
init_winnt_time();
#endif
init_auth();
init_util();
init_restrict();
init_mon();
init_timer();
init_lib();
init_random();
init_request();
init_control();
init_peer();
#ifdef REFCLOCK
init_refclock();
#endif
set_process_priority();
init_proto(); /* Call at high priority */
init_io();
init_loopfilter();
mon_start(MON_ON); /* monitor on by default now */
/* turn off in config if unwanted */
 
/*
* Get configuration. This (including argument list parsing) is
* done in a separate module since this will definitely be different
* for the gizmo board. While at it, save the host name for later
* along with the length. The crypto needs this.
*/
#ifdef DEBUG
debug = 0;
#endif
getconfig(argc, argv);
#ifdef OPENSSL
crypto_setup();
#endif /* OPENSSL */
initializing = 0;
 
#if defined(SYS_WINNT) && !defined(NODETACH)
# if defined(DEBUG)
if(!debug)
{
# endif
if (NoWinService == FALSE) {
/* report to the service control manager that the service is running */
ssStatus.dwCurrentState = SERVICE_RUNNING;
ssStatus.dwWin32ExitCode = NO_ERROR;
if (!SetServiceStatus(sshStatusHandle, &ssStatus))
{
msyslog(LOG_ERR, "SetServiceStatus: %m");
if (ResolverThreadHandle != NULL)
CloseHandle(ResolverThreadHandle);
ssStatus.dwCurrentState = SERVICE_STOPPED;
SetServiceStatus(sshStatusHandle, &ssStatus);
return;
}
}
# if defined(DEBUG)
}
# endif
#endif
 
#ifdef HAVE_CLOCKCTL
/*
* Drop super-user privileges and chroot now if the OS supports
* non root clock control (only NetBSD for now).
*/
if (user != NULL) {
if (isdigit((unsigned char)*user)) {
sw_uid = (uid_t)strtoul(user, &endp, 0);
if (*endp != '\0')
goto getuser;
} else {
getuser:
if ((pw = getpwnam(user)) != NULL) {
sw_uid = pw->pw_uid;
} else {
errno = 0;
msyslog(LOG_ERR, "Cannot find user `%s'", user);
exit (-1);
}
}
}
if (group != NULL) {
if (isdigit((unsigned char)*group)) {
sw_gid = (gid_t)strtoul(group, &endp, 0);
if (*endp != '\0')
goto getgroup;
} else {
getgroup:
if ((gr = getgrnam(group)) != NULL) {
sw_gid = pw->pw_gid;
} else {
errno = 0;
msyslog(LOG_ERR, "Cannot find group `%s'", group);
exit (-1);
}
}
}
if (chrootdir && chroot(chrootdir)) {
msyslog(LOG_ERR, "Cannot chroot to `%s': %m", chrootdir);
exit (-1);
}
if (group && setgid(sw_gid)) {
msyslog(LOG_ERR, "Cannot setgid() to group `%s': %m", group);
exit (-1);
}
if (group && setegid(sw_gid)) {
msyslog(LOG_ERR, "Cannot setegid() to group `%s': %m", group);
exit (-1);
}
if (user && setuid(sw_uid)) {
msyslog(LOG_ERR, "Cannot setuid() to user `%s': %m", user);
exit (-1);
}
if (user && seteuid(sw_uid)) {
msyslog(LOG_ERR, "Cannot seteuid() to user `%s': %m", user);
exit (-1);
}
#endif
/*
* Report that we're up to any trappers
*/
report_event(EVNT_SYSRESTART, (struct peer *)0);
 
/*
* Use select() on all on all input fd's for unlimited
* time. select() will terminate on SIGALARM or on the
* reception of input. Using select() means we can't do
* robust signal handling and we get a potential race
* between checking for alarms and doing the select().
* Mostly harmless, I think.
*/
/* On VMS, I suspect that select() can't be interrupted
* by a "signal" either, so I take the easy way out and
* have select() time out after one second.
* System clock updates really aren't time-critical,
* and - lacking a hardware reference clock - I have
* yet to learn about anything else that is.
*/
#if defined(HAVE_IO_COMPLETION_PORT)
WaitHandles[0] = CreateEvent(NULL, FALSE, FALSE, NULL); /* exit reques */
WaitHandles[1] = get_timer_handle();
WaitHandles[2] = get_io_event();
 
for (;;) {
DWORD Index = WaitForMultipleObjectsEx(sizeof(WaitHandles)/sizeof(WaitHandles[0]), WaitHandles, FALSE, 1000, TRUE);
switch (Index) {
case WAIT_OBJECT_0 + 0 : /* exit request */
exit(0);
break;
 
case WAIT_OBJECT_0 + 1 : /* timer */
timer();
break;
 
case WAIT_OBJECT_0 + 2 : /* Io event */
# ifdef DEBUG
if ( debug > 3 )
{
printf( "IoEvent occurred\n" );
}
# endif
break;
 
case WAIT_IO_COMPLETION : /* loop */
case WAIT_TIMEOUT :
break;
case WAIT_FAILED:
msyslog(LOG_ERR, "ntpdc: WaitForMultipleObjectsEx Failed: Error: %m");
break;
 
/* For now do nothing if not expected */
default:
break;
} /* switch */
rbuflist = getrecvbufs(); /* get received buffers */
 
#else /* normal I/O */
 
was_alarmed = 0;
rbuflist = (struct recvbuf *)0;
for (;;)
{
# if !defined(HAVE_SIGNALED_IO)
extern fd_set activefds;
extern int maxactivefd;
 
fd_set rdfdes;
int nfound;
# elif defined(HAVE_SIGNALED_IO)
block_io_and_alarm();
# endif
 
rbuflist = getrecvbufs(); /* get received buffers */
if (alarm_flag) /* alarmed? */
{
was_alarmed = 1;
alarm_flag = 0;
}
 
if (!was_alarmed && rbuflist == (struct recvbuf *)0)
{
/*
* Nothing to do. Wait for something.
*/
# ifndef HAVE_SIGNALED_IO
rdfdes = activefds;
# if defined(VMS) || defined(SYS_VXWORKS)
/* make select() wake up after one second */
{
struct timeval t1;
 
t1.tv_sec = 1; t1.tv_usec = 0;
nfound = select(maxactivefd+1, &rdfdes, (fd_set *)0,
(fd_set *)0, &t1);
}
# else
nfound = select(maxactivefd+1, &rdfdes, (fd_set *)0,
(fd_set *)0, (struct timeval *)0);
# endif /* VMS */
if (nfound > 0)
{
l_fp ts;
 
get_systime(&ts);
 
(void)input_handler(&ts);
}
else if (nfound == -1 && errno != EINTR)
msyslog(LOG_ERR, "select() error: %m");
# ifdef DEBUG
else if (debug > 2)
msyslog(LOG_DEBUG, "select(): nfound=%d, error: %m", nfound);
# endif /* DEBUG */
# else /* HAVE_SIGNALED_IO */
wait_for_signal();
# endif /* HAVE_SIGNALED_IO */
if (alarm_flag) /* alarmed? */
{
was_alarmed = 1;
alarm_flag = 0;
}
rbuflist = getrecvbufs(); /* get received buffers */
}
# ifdef HAVE_SIGNALED_IO
unblock_io_and_alarm();
# endif /* HAVE_SIGNALED_IO */
 
/*
* Out here, signals are unblocked. Call timer routine
* to process expiry.
*/
if (was_alarmed)
{
timer();
was_alarmed = 0;
}
 
#endif /* HAVE_IO_COMPLETION_PORT */
/*
* Call the data procedure to handle each received
* packet.
*/
while (rbuflist != (struct recvbuf *)0)
{
rbuf = rbuflist;
rbuflist = rbuf->next;
(rbuf->receiver)(rbuf);
freerecvbuf(rbuf);
}
#if defined DEBUG && defined SYS_WINNT
if (debug > 4)
printf("getrecvbufs: %ld handler interrupts, %ld frames\n",
handler_calls, handler_pkts);
#endif
 
/*
* Go around again
*/
}
#ifndef SYS_WINNT
exit(1); /* unreachable */
#endif
#ifndef SYS_WINNT
return 1; /* DEC OSF cc braindamage */
#endif
}
 
 
#ifdef SIGDIE2
/*
* finish - exit gracefully
*/
static RETSIGTYPE
finish(
int sig
)
{
 
msyslog(LOG_NOTICE, "ntpd exiting on signal %d", sig);
 
switch (sig)
{
# ifdef SIGBUS
case SIGBUS:
printf("\nfinish(SIGBUS)\n");
exit(0);
# endif
case 0: /* Should never happen... */
return;
default:
exit(0);
}
}
#endif /* SIGDIE2 */
 
 
#ifdef DEBUG
#ifndef SYS_WINNT
/*
* moredebug - increase debugging verbosity
*/
static RETSIGTYPE
moredebug(
int sig
)
{
int saved_errno = errno;
 
if (debug < 255)
{
debug++;
msyslog(LOG_DEBUG, "debug raised to %d", debug);
}
errno = saved_errno;
}
 
/*
* lessdebug - decrease debugging verbosity
*/
static RETSIGTYPE
lessdebug(
int sig
)
{
int saved_errno = errno;
 
if (debug > 0)
{
debug--;
msyslog(LOG_DEBUG, "debug lowered to %d", debug);
}
errno = saved_errno;
}
#endif
#else /* not DEBUG */
#ifndef SYS_WINNT/*
* no_debug - We don't do the debug here.
*/
static RETSIGTYPE
no_debug(
int sig
)
{
int saved_errno = errno;
 
msyslog(LOG_DEBUG, "ntpd not compiled for debugging (signal %d)", sig);
errno = saved_errno;
}
#endif /* not SYS_WINNT */
#endif /* not DEBUG */
 
#ifdef SYS_WINNT
/* service_ctrl - control handler for NTP service
* signals the service_main routine of start/stop requests
* from the control panel or other applications making
* win32API calls
*/
void
service_ctrl(
DWORD dwCtrlCode
)
{
DWORD dwState = SERVICE_RUNNING;
 
/* Handle the requested control code */
switch(dwCtrlCode)
{
case SERVICE_CONTROL_PAUSE:
/* see no reason to support this */
break;
 
case SERVICE_CONTROL_CONTINUE:
/* see no reason to support this */
break;
 
case SERVICE_CONTROL_STOP:
dwState = SERVICE_STOP_PENDING;
/*
* Report the status, specifying the checkpoint and waithint,
* before setting the termination event.
*/
ssStatus.dwCurrentState = dwState;
ssStatus.dwWin32ExitCode = NO_ERROR;
ssStatus.dwWaitHint = 3000;
if (!SetServiceStatus(sshStatusHandle, &ssStatus))
{
msyslog(LOG_ERR, "SetServiceStatus: %m");
}
if (WaitHandles[0] != NULL) {
SetEvent(WaitHandles[0]);
}
return;
 
case SERVICE_CONTROL_INTERROGATE:
/* Update the service status */
break;
 
default:
/* invalid control code */
break;
 
}
 
ssStatus.dwCurrentState = dwState;
ssStatus.dwWin32ExitCode = NO_ERROR;
if (!SetServiceStatus(sshStatusHandle, &ssStatus))
{
msyslog(LOG_ERR, "SetServiceStatus: %m");
}
}
 
static BOOL WINAPI
OnConsoleEvent(
DWORD dwCtrlType
)
{
switch (dwCtrlType) {
case CTRL_BREAK_EVENT :
if (debug > 0) {
debug <<= 1;
}
else {
debug = 1;
}
if (debug > 8) {
debug = 0;
}
printf("debug level %d\n", debug);
break ;
 
case CTRL_C_EVENT :
case CTRL_CLOSE_EVENT :
case CTRL_SHUTDOWN_EVENT :
if (WaitHandles[0] != NULL) {
SetEvent(WaitHandles[0]);
}
break;
 
default :
return FALSE;
 
 
}
return TRUE;;
}
 
 
/*
* NT version of exit() - all calls to exit() should be routed to
* this function.
*/
void
service_exit(
int status
)
{
if (!debug) { /* did not become a service, simply exit */
/* service mode, need to have the service_main routine
* register with the service control manager that the
* service has stopped running, before exiting
*/
ssStatus.dwCurrentState = SERVICE_STOPPED;
SetServiceStatus(sshStatusHandle, &ssStatus);
 
}
uninit_io_completion_port();
reset_winnt_time();
 
# if defined _MSC_VER
_CrtDumpMemoryLeaks();
# endif
#undef exit
exit(status);
}
 
#endif /* SYS_WINNT */
/FreeBSD/mac_settime/trunk/src/module/mac_settime.c
68,7 → 68,7
&mac_settime_enabled, 0, "Enforce settime policy");
TUNABLE_INT("security.mac.settime.enabled", &mac_settime_enabled);
 
MALLOC_DEFINE(M_SETTIME, "settime rule", "Rules for mac_settime");
static MALLOC_DEFINE(M_SETTIME, "settime rule", "Rules for mac_settime");
 
#define MAC_RULE_STRING_LEN 10240
 
112,10 → 112,8
 
while ((rule = TAILQ_FIRST(head)) != NULL) {
TAILQ_REMOVE(head, rule, r_entries);
if (rule->id != NULL)
free(rule->id, M_SETTIME);
if (rule->jailid != NULL)
free(rule->jailid, M_SETTIME);
free(rule->id, M_SETTIME);
free(rule->jailid, M_SETTIME);
free(rule, M_SETTIME);
}
}
244,6 → 242,9
enum parse_state state;
 
r = malloc(sizeof(*r), M_SETTIME, M_ZERO | M_WAITOK);
r->id = NULL; /* some arch where (pointer)0 != (binary)0 ? */
r->jailid = NULL;
 
error = 0;
not = 0;
state = STATE_BEFORE_ACTION;