mirror of
https://github.com/torvalds/linux.git
synced 2024-12-04 18:13:04 +00:00
iio: common: ssp_sensors: use ktime_get_real_ns() timestamps
getnstimeofday() suffers from the overflow in y2038 on 32-bit architectures and requires a conversion into the nanosecond format that we want here. This changes ssp_parse_dataframe() to use ktime_get_real_ns() directly, which does not have that problem. An open question is what time base should be used here. Normally timestamps should use ktime_get_ns() or ktime_get_boot_ns() to read monotonic time instead of "real" time, which suffers from time jumps due to settimeofday() calls or leap seconds. Signed-off-by: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
1b39eacdbd
commit
a4493f227d
@ -277,12 +277,9 @@ static int ssp_handle_big_data(struct ssp_data *data, char *dataframe, int *idx)
|
||||
static int ssp_parse_dataframe(struct ssp_data *data, char *dataframe, int len)
|
||||
{
|
||||
int idx, sd;
|
||||
struct timespec ts;
|
||||
struct ssp_sensor_data *spd;
|
||||
struct iio_dev **indio_devs = data->sensor_devs;
|
||||
|
||||
getnstimeofday(&ts);
|
||||
|
||||
for (idx = 0; idx < len;) {
|
||||
switch (dataframe[idx++]) {
|
||||
case SSP_MSG2AP_INST_BYPASS_DATA:
|
||||
@ -329,7 +326,7 @@ static int ssp_parse_dataframe(struct ssp_data *data, char *dataframe, int len)
|
||||
}
|
||||
|
||||
if (data->time_syncing)
|
||||
data->timestamp = ts.tv_sec * 1000000000ULL + ts.tv_nsec;
|
||||
data->timestamp = ktime_get_real_ns();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user