tests: cris: remove check_time1

This test, borrowed from the GDB simulator test suite, checks that every
syscall increments the time returned by gettimeofday() by exactly 1 ms.
This is not guaranteed or even desirable on QEMU so remove this test.

Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
Signed-off-by: Rabin Vincent <rabinv@axis.com>
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
This commit is contained in:
Rabin Vincent 2016-08-23 16:50:18 +02:00 committed by Edgar E. Iglesias
parent d10a0102b3
commit 85b3ed1db5
2 changed files with 0 additions and 47 deletions

View File

@ -114,7 +114,6 @@ TESTCASES += check_mmap1.ctst
TESTCASES += check_mmap2.ctst
TESTCASES += check_mmap3.ctst
TESTCASES += check_sigalrm.ctst
TESTCASES += check_time1.ctst
TESTCASES += check_time2.ctst
TESTCASES += check_settls1.ctst

View File

@ -1,46 +0,0 @@
/* Basic time functionality test: check that milliseconds are
incremented for each syscall (does not work on host). */
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <string.h>
#include <stdlib.h>
void err (const char *s)
{
perror (s);
abort ();
}
int
main (void)
{
struct timeval t_m = {0, 0};
struct timezone t_z = {0, 0};
struct timeval t_m1 = {0, 0};
int i;
if (gettimeofday (&t_m, &t_z) != 0)
err ("gettimeofday");
for (i = 1; i < 10000; i++)
if (gettimeofday (&t_m1, NULL) != 0)
err ("gettimeofday 1");
else
if (t_m1.tv_sec * 1000000 + t_m1.tv_usec
!= (t_m.tv_sec * 1000000 + t_m.tv_usec + i * 1000))
{
fprintf (stderr, "t0 (%ld, %ld), i %d, t1 (%ld, %ld)\n",
t_m.tv_sec, t_m.tv_usec, i, t_m1.tv_sec, t_m1.tv_usec);
abort ();
}
if (time (NULL) != t_m1.tv_sec)
{
fprintf (stderr, "time != gettod\n");
abort ();
}
printf ("pass\n");
exit (0);
}