kore/src/utils.c

629 lines
11 KiB
C
Raw Normal View History

2013-04-17 22:34:27 +02:00
/*
2018-01-20 22:51:06 +01:00
* Copyright (c) 2013-2018 Joris Vink <joris@coders.se>
2013-04-17 22:34:27 +02:00
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
2014-08-01 16:10:37 +02:00
#include <sys/time.h>
#include <ctype.h>
2016-01-22 12:08:13 +01:00
#include <stdio.h>
#include <stdarg.h>
#include <string.h>
#include <stdlib.h>
2016-01-07 09:24:45 +01:00
#include <time.h>
#include <limits.h>
2013-04-17 22:34:27 +02:00
#include "kore.h"
static struct {
char *name;
int value;
} month_names[] = {
{ "Jan", 0 },
{ "Feb", 1 },
{ "Mar", 2 },
{ "Apr", 3 },
{ "May", 4 },
{ "Jun", 5 },
{ "Jul", 6 },
{ "Aug", 7 },
{ "Sep", 8 },
{ "Oct", 9 },
{ "Nov", 10 },
{ "Dec", 11 },
{ NULL, 0 },
};
static char b64table[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
Rework HTTP and worker processes. The HTTP layer used to make a copy of each incoming header and its value for a request. Stop doing that and make HTTP headers zero-copy all across the board. This change comes with some api function changes, notably the http_request_header() function which now takes a const char ** rather than a char ** out pointer. This commit also constifies several members of http_request, beware. Additional rework how the worker processes deal with the accept lock. Before: if a worker held the accept lock and it accepted a new connection it would release the lock for others and back off for 500ms before attempting to grab the lock again. This approach worked but under high load this starts becoming obvious. Now: - workers not holding the accept lock and not having any connections will wait less long before returning from kore_platform_event_wait(). - workers not holding the accept lock will no longer blindly wait an arbitrary amount in kore_platform_event_wait() but will look at how long until the next lock grab is and base their timeout on that. - if a worker its next_lock timeout is up and failed to grab the lock it will try again in half the time again. - the worker process holding the lock will when releasing the lock double check if it still has space for newer connections, if it does it will keep the lock until it is full. This prevents the lock from bouncing between several non busy worker processes all the time. Additional fixes: - Reduce the number of times we check the timeout list, only do it twice per second rather then every event tick. - Fix solo worker count for TLS (we actually hold two processes, not one). - Make sure we don't accidentally miscalculate the idle time causing new connections under heavy load to instantly drop. - Swap from gettimeofday() to clock_gettime() now that MacOS caught up.
2018-02-14 13:48:49 +01:00
#if defined(KORE_DEBUG)
2013-04-17 22:34:27 +02:00
void
kore_debug_internal(char *file, int line, const char *fmt, ...)
2013-04-17 22:34:27 +02:00
{
va_list args;
char buf[2048];
va_start(args, fmt);
2014-04-23 16:29:58 +02:00
(void)vsnprintf(buf, sizeof(buf), fmt, args);
2013-04-17 22:34:27 +02:00
va_end(args);
2013-06-26 11:18:32 +02:00
printf("[%d] %s:%d - %s\n", kore_pid, file, line, buf);
2013-04-17 22:34:27 +02:00
}
Rework HTTP and worker processes. The HTTP layer used to make a copy of each incoming header and its value for a request. Stop doing that and make HTTP headers zero-copy all across the board. This change comes with some api function changes, notably the http_request_header() function which now takes a const char ** rather than a char ** out pointer. This commit also constifies several members of http_request, beware. Additional rework how the worker processes deal with the accept lock. Before: if a worker held the accept lock and it accepted a new connection it would release the lock for others and back off for 500ms before attempting to grab the lock again. This approach worked but under high load this starts becoming obvious. Now: - workers not holding the accept lock and not having any connections will wait less long before returning from kore_platform_event_wait(). - workers not holding the accept lock will no longer blindly wait an arbitrary amount in kore_platform_event_wait() but will look at how long until the next lock grab is and base their timeout on that. - if a worker its next_lock timeout is up and failed to grab the lock it will try again in half the time again. - the worker process holding the lock will when releasing the lock double check if it still has space for newer connections, if it does it will keep the lock until it is full. This prevents the lock from bouncing between several non busy worker processes all the time. Additional fixes: - Reduce the number of times we check the timeout list, only do it twice per second rather then every event tick. - Fix solo worker count for TLS (we actually hold two processes, not one). - Make sure we don't accidentally miscalculate the idle time causing new connections under heavy load to instantly drop. - Swap from gettimeofday() to clock_gettime() now that MacOS caught up.
2018-02-14 13:48:49 +01:00
#endif
2013-04-17 22:34:27 +02:00
void
kore_log_init(void)
{
#if defined(KORE_SINGLE_BINARY)
extern const char *__progname;
const char *name = __progname;
#else
const char *name = "kore";
#endif
if (!foreground)
openlog(name, LOG_NDELAY | LOG_PID, LOG_DAEMON);
}
void
kore_log(int prio, const char *fmt, ...)
{
va_list args;
char buf[2048], tmp[32];
va_start(args, fmt);
2014-04-23 16:29:58 +02:00
(void)vsnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
if (worker != NULL) {
(void)snprintf(tmp, sizeof(tmp), "wrk %d", worker->id);
#if !defined(KORE_NO_TLS)
if (worker->id == KORE_WORKER_KEYMGR)
(void)kore_strlcpy(tmp, "keymgr", sizeof(tmp));
#endif
if (foreground)
printf("[%s]: %s\n", tmp, buf);
else
syslog(prio, "[%s]: %s", tmp, buf);
} else {
if (foreground)
printf("[parent]: %s\n", buf);
else
syslog(prio, "[parent]: %s", buf);
}
}
size_t
kore_strlcpy(char *dst, const char *src, const size_t len)
{
char *d = dst;
const char *s = src;
const char *end = dst + len - 1;
if (len == 0)
fatal("kore_strlcpy: len == 0");
while ((*d = *s) != '\0') {
if (d == end) {
*d = '\0';
break;
}
d++;
s++;
}
while (*s != '\0')
s++;
return (s - src);
}
int
kore_snprintf(char *str, size_t size, int *len, const char *fmt, ...)
{
int l;
va_list args;
va_start(args, fmt);
l = vsnprintf(str, size, fmt, args);
va_end(args);
if (l == -1 || (size_t)l >= size)
return (KORE_RESULT_ERROR);
if (len != NULL)
*len = l;
return (KORE_RESULT_OK);
}
long long
kore_strtonum(const char *str, int base, long long min, long long max, int *err)
2013-05-01 16:03:48 +02:00
{
long long l;
2013-05-01 16:03:48 +02:00
char *ep;
if (min > max) {
*err = KORE_RESULT_ERROR;
return (0);
}
errno = 0;
l = strtoll(str, &ep, base);
2013-05-01 16:03:48 +02:00
if (errno != 0 || str == ep || *ep != '\0') {
*err = KORE_RESULT_ERROR;
return (0);
}
if (l < min) {
*err = KORE_RESULT_ERROR;
return (0);
}
if (l > max) {
*err = KORE_RESULT_ERROR;
return (0);
}
*err = KORE_RESULT_OK;
return (l);
}
u_int64_t
kore_strtonum64(const char *str, int sign, int *err)
{
u_int64_t l;
long long ll;
char *ep;
int check;
2014-07-30 09:00:19 +02:00
l = 0;
check = 1;
ll = strtoll(str, &ep, 10);
if ((errno == EINVAL || errno == ERANGE) &&
(ll == LLONG_MIN || ll == LLONG_MAX)) {
if (sign) {
*err = KORE_RESULT_ERROR;
return (0);
}
check = 0;
}
if (!sign) {
l = strtoull(str, &ep, 10);
if ((errno == EINVAL || errno == ERANGE) && l == ULONG_MAX) {
*err = KORE_RESULT_ERROR;
return (0);
}
if (check && ll < 0) {
*err = KORE_RESULT_ERROR;
return (0);
}
}
if (str == ep || *ep != '\0') {
*err = KORE_RESULT_ERROR;
return (0);
}
*err = KORE_RESULT_OK;
return ((sign) ? (u_int64_t)ll : l);
}
double
kore_strtodouble(const char *str, long double min, long double max, int *err)
{
double d;
char *ep;
if (min > max) {
*err = KORE_RESULT_ERROR;
return (0);
}
errno = 0;
d = strtod(str, &ep);
if (d == 0 || errno == ERANGE || str == ep || *ep != '\0') {
*err = KORE_RESULT_ERROR;
return (0);
}
if (d < min) {
*err = KORE_RESULT_ERROR;
return (0);
}
if (d > max) {
*err = KORE_RESULT_ERROR;
return (0);
}
*err = KORE_RESULT_OK;
return (d);
}
int
2016-10-06 16:50:41 +02:00
kore_split_string(char *input, const char *delim, char **out, size_t ele)
{
int count;
char **ap;
if (ele == 0)
return (0);
count = 0;
for (ap = out; ap < &out[ele - 1] &&
(*ap = strsep(&input, delim)) != NULL;) {
if (**ap != '\0') {
ap++;
count++;
}
}
*ap = NULL;
return (count);
}
void
2016-02-13 14:33:06 +01:00
kore_strip_chars(char *in, const char strip, char **out)
{
u_int32_t len;
char *s, *p;
len = strlen(in);
*out = kore_malloc(len + 1);
p = *out;
for (s = in; s < (in + len); s++) {
if (*s == strip)
continue;
*p++ = *s;
}
*p = '\0';
}
time_t
kore_date_to_time(const char *http_date)
{
time_t t;
int err, i;
struct tm tm, *ltm;
char *args[7], *tbuf[5], *sdup;
time(&t);
ltm = localtime(&t);
sdup = kore_strdup(http_date);
t = KORE_RESULT_ERROR;
if (kore_split_string(sdup, " ", args, 7) != 6) {
kore_debug("misformed http-date: '%s'", http_date);
goto out;
}
memset(&tm, 0, sizeof(tm));
tm.tm_year = kore_strtonum(args[3], 10, 1900, 2068, &err) - 1900;
if (err == KORE_RESULT_ERROR) {
kore_debug("misformed year in http-date: '%s'", http_date);
goto out;
}
for (i = 0; month_names[i].name != NULL; i++) {
if (!strcmp(month_names[i].name, args[2])) {
tm.tm_mon = month_names[i].value;
break;
}
}
if (month_names[i].name == NULL) {
kore_debug("misformed month in http-date: '%s'", http_date);
goto out;
}
tm.tm_mday = kore_strtonum(args[1], 10, 1, 31, &err);
if (err == KORE_RESULT_ERROR) {
kore_debug("misformed mday in http-date: '%s'", http_date);
goto out;
}
if (kore_split_string(args[4], ":", tbuf, 5) != 3) {
kore_debug("misformed HH:MM:SS in http-date: '%s'", http_date);
goto out;
}
tm.tm_hour = kore_strtonum(tbuf[0], 10, 0, 23, &err);
if (err == KORE_RESULT_ERROR) {
kore_debug("misformed hour in http-date: '%s'", http_date);
goto out;
}
tm.tm_min = kore_strtonum(tbuf[1], 10, 0, 59, &err);
if (err == KORE_RESULT_ERROR) {
kore_debug("misformed minutes in http-date: '%s'", http_date);
goto out;
}
tm.tm_sec = kore_strtonum(tbuf[2], 10, 0, 60, &err);
if (err == KORE_RESULT_ERROR) {
kore_debug("misformed seconds in http-date: '%s'", http_date);
goto out;
}
tm.tm_isdst = ltm->tm_isdst;
t = mktime(&tm) + ltm->tm_gmtoff;
if (t == -1) {
t = 0;
kore_debug("mktime() on '%s' failed", http_date);
}
out:
kore_free(sdup);
return (t);
}
char *
kore_time_to_date(time_t now)
{
struct tm *tm;
static time_t last = 0;
static char tbuf[32];
if (now != last) {
last = now;
tm = gmtime(&now);
if (!strftime(tbuf, sizeof(tbuf), "%a, %d %b %Y %T GMT", tm)) {
kore_debug("strftime() gave us NULL (%ld)", now);
return (NULL);
}
}
return (tbuf);
}
2013-06-24 09:36:40 +02:00
u_int64_t
kore_time_ms(void)
{
Rework HTTP and worker processes. The HTTP layer used to make a copy of each incoming header and its value for a request. Stop doing that and make HTTP headers zero-copy all across the board. This change comes with some api function changes, notably the http_request_header() function which now takes a const char ** rather than a char ** out pointer. This commit also constifies several members of http_request, beware. Additional rework how the worker processes deal with the accept lock. Before: if a worker held the accept lock and it accepted a new connection it would release the lock for others and back off for 500ms before attempting to grab the lock again. This approach worked but under high load this starts becoming obvious. Now: - workers not holding the accept lock and not having any connections will wait less long before returning from kore_platform_event_wait(). - workers not holding the accept lock will no longer blindly wait an arbitrary amount in kore_platform_event_wait() but will look at how long until the next lock grab is and base their timeout on that. - if a worker its next_lock timeout is up and failed to grab the lock it will try again in half the time again. - the worker process holding the lock will when releasing the lock double check if it still has space for newer connections, if it does it will keep the lock until it is full. This prevents the lock from bouncing between several non busy worker processes all the time. Additional fixes: - Reduce the number of times we check the timeout list, only do it twice per second rather then every event tick. - Fix solo worker count for TLS (we actually hold two processes, not one). - Make sure we don't accidentally miscalculate the idle time causing new connections under heavy load to instantly drop. - Swap from gettimeofday() to clock_gettime() now that MacOS caught up.
2018-02-14 13:48:49 +01:00
struct timespec ts;
2013-06-24 09:36:40 +02:00
Rework HTTP and worker processes. The HTTP layer used to make a copy of each incoming header and its value for a request. Stop doing that and make HTTP headers zero-copy all across the board. This change comes with some api function changes, notably the http_request_header() function which now takes a const char ** rather than a char ** out pointer. This commit also constifies several members of http_request, beware. Additional rework how the worker processes deal with the accept lock. Before: if a worker held the accept lock and it accepted a new connection it would release the lock for others and back off for 500ms before attempting to grab the lock again. This approach worked but under high load this starts becoming obvious. Now: - workers not holding the accept lock and not having any connections will wait less long before returning from kore_platform_event_wait(). - workers not holding the accept lock will no longer blindly wait an arbitrary amount in kore_platform_event_wait() but will look at how long until the next lock grab is and base their timeout on that. - if a worker its next_lock timeout is up and failed to grab the lock it will try again in half the time again. - the worker process holding the lock will when releasing the lock double check if it still has space for newer connections, if it does it will keep the lock until it is full. This prevents the lock from bouncing between several non busy worker processes all the time. Additional fixes: - Reduce the number of times we check the timeout list, only do it twice per second rather then every event tick. - Fix solo worker count for TLS (we actually hold two processes, not one). - Make sure we don't accidentally miscalculate the idle time causing new connections under heavy load to instantly drop. - Swap from gettimeofday() to clock_gettime() now that MacOS caught up.
2018-02-14 13:48:49 +01:00
(void)clock_gettime(CLOCK_MONOTONIC, &ts);
2013-06-24 09:36:40 +02:00
Rework HTTP and worker processes. The HTTP layer used to make a copy of each incoming header and its value for a request. Stop doing that and make HTTP headers zero-copy all across the board. This change comes with some api function changes, notably the http_request_header() function which now takes a const char ** rather than a char ** out pointer. This commit also constifies several members of http_request, beware. Additional rework how the worker processes deal with the accept lock. Before: if a worker held the accept lock and it accepted a new connection it would release the lock for others and back off for 500ms before attempting to grab the lock again. This approach worked but under high load this starts becoming obvious. Now: - workers not holding the accept lock and not having any connections will wait less long before returning from kore_platform_event_wait(). - workers not holding the accept lock will no longer blindly wait an arbitrary amount in kore_platform_event_wait() but will look at how long until the next lock grab is and base their timeout on that. - if a worker its next_lock timeout is up and failed to grab the lock it will try again in half the time again. - the worker process holding the lock will when releasing the lock double check if it still has space for newer connections, if it does it will keep the lock until it is full. This prevents the lock from bouncing between several non busy worker processes all the time. Additional fixes: - Reduce the number of times we check the timeout list, only do it twice per second rather then every event tick. - Fix solo worker count for TLS (we actually hold two processes, not one). - Make sure we don't accidentally miscalculate the idle time causing new connections under heavy load to instantly drop. - Swap from gettimeofday() to clock_gettime() now that MacOS caught up.
2018-02-14 13:48:49 +01:00
return ((u_int64_t)(ts.tv_sec * 1000 + (ts.tv_nsec / 1000000)));
2013-06-24 09:36:40 +02:00
}
int
2017-07-24 08:19:03 +02:00
kore_base64_encode(const void *data, size_t len, char **out)
{
2017-07-24 08:19:03 +02:00
u_int8_t n;
size_t nb;
const u_int8_t *ptr;
u_int32_t bytes;
struct kore_buf result;
2017-07-24 08:19:57 +02:00
nb = 0;
2017-07-24 08:19:03 +02:00
ptr = data;
kore_buf_init(&result, (len / 3) * 4);
while (len > 0) {
if (len > 2) {
nb = 3;
bytes = *ptr++ << 16;
bytes |= *ptr++ << 8;
bytes |= *ptr++;
} else if (len > 1) {
nb = 2;
bytes = *ptr++ << 16;
bytes |= *ptr++ << 8;
} else if (len == 1) {
nb = 1;
bytes = *ptr++ << 16;
} else {
kore_buf_cleanup(&result);
return (KORE_RESULT_ERROR);
}
2017-07-24 08:19:03 +02:00
n = (bytes >> 18) & 0x3f;
kore_buf_append(&result, &(b64table[n]), 1);
n = (bytes >> 12) & 0x3f;
kore_buf_append(&result, &(b64table[n]), 1);
if (nb > 1) {
n = (bytes >> 6) & 0x3f;
kore_buf_append(&result, &(b64table[n]), 1);
if (nb > 2) {
n = bytes & 0x3f;
kore_buf_append(&result, &(b64table[n]), 1);
}
}
2017-07-24 08:19:03 +02:00
len -= nb;
}
2017-07-24 08:19:03 +02:00
switch (nb) {
case 1:
kore_buf_appendf(&result, "==");
break;
case 2:
kore_buf_appendf(&result, "=");
break;
case 3:
break;
default:
kore_buf_cleanup(&result);
return (KORE_RESULT_ERROR);
}
2017-07-24 08:19:03 +02:00
/* result.data gets taken over so no need to cleanup result. */
*out = kore_buf_stringify(&result, NULL);
return (KORE_RESULT_OK);
}
int
kore_base64_decode(const char *in, u_int8_t **out, size_t *olen)
{
2014-04-23 12:32:54 +02:00
int i, c;
struct kore_buf *res;
u_int8_t d, n, o;
u_int32_t b, len, idx;
2014-04-23 12:32:54 +02:00
i = 4;
b = 0;
d = 0;
2014-07-30 09:00:19 +02:00
c = 0;
len = strlen(in);
2016-07-14 12:34:29 +02:00
res = kore_buf_alloc(len);
for (idx = 0; idx < len; idx++) {
2014-04-23 12:32:54 +02:00
c = in[idx];
if (c == '=')
break;
for (o = 0; o < sizeof(b64table); o++) {
2014-04-23 12:32:54 +02:00
if (b64table[o] == c) {
d = o;
break;
}
}
2014-04-23 12:32:54 +02:00
if (o == sizeof(b64table)) {
*out = NULL;
kore_buf_free(res);
return (KORE_RESULT_ERROR);
}
2014-04-23 12:32:54 +02:00
b |= (d & 0x3f) << ((i - 1) * 6);
i--;
if (i == 0) {
for (i = 2; i >= 0; i--) {
n = (b >> (8 * i));
kore_buf_append(res, &n, 1);
}
b = 0;
2014-04-23 12:32:54 +02:00
i = 4;
}
}
2014-04-23 12:32:54 +02:00
if (c == '=') {
if (i > 2) {
*out = NULL;
kore_buf_free(res);
return (KORE_RESULT_ERROR);
}
o = i;
for (i = 2; i >= o; i--) {
n = (b >> (8 * i));
kore_buf_append(res, &n, 1);
}
}
*out = kore_buf_release(res, olen);
return (KORE_RESULT_OK);
}
void *
2018-07-25 09:54:34 +02:00
kore_mem_find(void *src, size_t slen, const void *needle, size_t len)
{
2016-04-01 19:55:43 +02:00
size_t pos;
2016-05-05 15:29:43 +02:00
for (pos = 0; pos < slen; pos++) {
2018-07-25 09:54:34 +02:00
if ( *((u_int8_t *)src + pos) != *(const u_int8_t *)needle)
continue;
if ((slen - pos) < len)
return (NULL);
if (!memcmp((u_int8_t *)src + pos, needle, len))
return ((u_int8_t *)src + pos);
}
return (NULL);
}
char *
kore_text_trim(char *string, size_t len)
{
char *end;
if (len == 0)
return (string);
end = (string + len) - 1;
2017-02-07 22:06:14 +01:00
while (isspace(*(unsigned char *)string) && string < end)
string++;
2017-02-07 22:06:14 +01:00
while (isspace(*(unsigned char *)end) && end > string)
*(end)-- = '\0';
return (string);
}
char *
kore_read_line(FILE *fp, char *in, size_t len)
{
char *p, *t;
if (fgets(in, len, fp) == NULL)
return (NULL);
p = in;
in[strcspn(in, "\n")] = '\0';
2017-02-07 22:06:14 +01:00
while (isspace(*(unsigned char *)p))
p++;
if (p[0] == '#' || p[0] == '\0') {
p[0] = '\0';
return (p);
}
for (t = p; *t != '\0'; t++) {
if (*t == '\t')
*t = ' ';
}
return (p);
}
2013-04-17 22:34:27 +02:00
void
fatal(const char *fmt, ...)
{
va_list args;
char buf[2048];
extern const char *__progname;
2013-04-17 22:34:27 +02:00
va_start(args, fmt);
2014-04-23 16:29:58 +02:00
(void)vsnprintf(buf, sizeof(buf), fmt, args);
2013-04-17 22:34:27 +02:00
va_end(args);
if (!foreground)
kore_log(LOG_ERR, "%s", buf);
#if !defined(KORE_NO_TLS)
if (worker != NULL && worker->id == KORE_WORKER_KEYMGR)
kore_keymgr_cleanup(1);
#endif
printf("%s: %s\n", __progname, buf);
2013-04-17 22:34:27 +02:00
exit(1);
}