Lines Matching defs:sec

90 	time_t sec;
115 sec = ts.tv_sec;
129 else if ((sec <= last.tv_sec) && /* same or lower seconds, and */
130 ((sec != last.tv_sec) || /* either different second or */
132 ((last.tv_sec - sec) <= 5)) { /* not way back in time */
133 sec = last.tv_sec;
137 sec++;
140 last.tv_sec = sec;
144 tv->tv_sec = sec;
800 itp->it_value.tv_usec = 0; /* sec is already 0 */
898 time_t sec;
907 sec = tv->tv_sec - now.tv_sec;
911 sec--;
914 sec++;
926 if (sec < 0 || (sec == 0 && ticks < 1))
928 else if (sec > (LONG_MAX - ticks) / hz)
931 ticks += sec * hz; /* common case */
944 int64_t sec;
947 sec = tv->tv_sec;
951 sec--;
954 sec++;
966 if (sec < 0 || (sec == 0 && ticks < 1))
968 else if (sec > (((~0ULL) >> 1) - ticks) / hz)
971 ticks += sec * hz; /* common case */
981 * tsp->sec = hrt / NANOSEC;
1047 * sec = hrt;
1048 * sec += (hrt << 6);
1049 * sec -= (hrt << 8);
1050 * sec += (hrt << 13);
1051 * sec += (hrt << 14);
1052 * sec -= (hrt << 20);
1053 * sec += (hrt << 23);
1054 * sec += (hrt << 24);
1055 * sec += (hrt << 27);
1056 * sec += (hrt << 31);
1057 * sec >>= (32 + 30);
1072 * sec = tmp;
1073 * sec += (tmp << 6);
1074 * sec -= (tmp << 8);
1075 * sec += (tmp << 13);
1076 * sec += (tmp << 14);
1077 * sec -= (tmp << 20);
1078 * sec += (tmp << 23);
1079 * sec += (tmp << 24);
1080 * sec += (tmp << 27);
1081 * sec += (tmp << 31);
1082 * sec >>= 32;
1091 * sec = tmp;
1092 * tmp <<= 6; sec += tmp;
1093 * tmp <<= 2; sec -= tmp;
1094 * tmp <<= 5; sec += tmp;
1095 * tmp <<= 1; sec += tmp;
1096 * tmp <<= 6; sec -= tmp;
1097 * tmp <<= 3; sec += tmp;
1098 * tmp <<= 1; sec += tmp;
1099 * tmp <<= 3; sec += tmp;
1100 * tmp <<= 4; sec += tmp;
1101 * sec >>= 32;
1107 * tmp <<= 6; sec = sec + tmp;
1109 * tmp <<= 6; sec = (sec + tmp) >> 6;
1111 * sec = (sec >> 6) + tmp;
1113 * The final shift ("sec >>= 32") goes away.
1119 * Thus, the final result ("sec") is correct to +/- 1.
1121 * It turns out to be important to keep "sec" positive at each step, because
1124 * "sec = (sec >> n) - tmp" must be changed to "sec = tmp - (sec >> n)", and
1129 * sec = tmp + (sec >> 6);
1130 * sec = tmp - (tmp >> 2);
1131 * sec = tmp - (sec >> 5);
1132 * sec = tmp + (sec >> 1);
1133 * sec = tmp - (sec >> 6);
1134 * sec = tmp - (sec >> 3);
1135 * sec = tmp + (sec >> 1);
1136 * sec = tmp + (sec >> 3);
1137 * sec = tmp + (sec >> 4);
1139 * This yields a value for sec that is accurate to +1/-1, so we have two
1141 * the rounding toward zero, so that sec is always less than or equal to
1142 * the correct value. With this modified code, sec is accurate to +0/-2, with
1144 * deal with one case (sec too small) in the cleanup code.
1147 * ("sec = tmp + (sec >> 6);"), since it only has an effect when bit 31 is
1149 * *guaranteed* accuracy of sec to +0/-3, but speeds up the common cases.
1151 * Finally, we compute nsec = hrt - (sec * 1,000,000,000). nsec will always
1152 * be positive (since sec is never too large), and will at most be equal to
1153 * the error in sec (times 1,000,000,000) plus the low-order 30 bits of hrt.
1156 * sec * 1,000,000,000, we only need the low 32 bits, so we can just do 32-bit
1163 * sec++;
1175 uint32_t sec, nsec, tmp;
1178 sec = tmp - (tmp >> 2);
1179 sec = tmp - (sec >> 5);
1180 sec = tmp + (sec >> 1);
1181 sec = tmp - (sec >> 6) + 7;
1182 sec = tmp - (sec >> 3);
1183 sec = tmp + (sec >> 1);
1184 sec = tmp + (sec >> 3);
1185 sec = tmp + (sec >> 4);
1186 tmp = (sec << 7) - sec - sec - sec;
1192 sec++;
1194 tsp->tv_sec = (time_t)sec;
1249 uint32_t sec, nsec, tmp;
1253 sec = tmp - (tmp >> 2);
1254 sec = tmp - (sec >> 5);
1255 sec = tmp + (sec >> 1);
1256 sec = tmp - (sec >> 6) + 7;
1257 sec = tmp - (sec >> 3);
1258 sec = tmp + (sec >> 1);
1259 sec = tmp + (sec >> 3);
1260 sec = tmp + (sec >> 4);
1261 tmp = (sec << 7) - sec - sec - sec;
1267 sec++;
1269 tvp->tv_sec = (time_t)sec;
1491 utc = (utc << 6) - (utc << 2) + tod.tod_sec; /* 60 * min + sec */