iio: imu: nvi v.329 Fix ICM DMP period
[linux-3.10.git] / drivers / iio / imu / nvi_mpu / nvi.c
1 /* Copyright (c) 2014-2016, NVIDIA CORPORATION.  All rights reserved.
2  *
3  * This software is licensed under the terms of the GNU General Public
4  * License version 2, as published by the Free Software Foundation, and
5  * may be copied, distributed, and modified under those terms.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  */
12
13 /* NVS = NVidia Sensor framework */
14 /* See nvs_iio.c and nvs.h for documentation */
15
16
17 #include <linux/i2c.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/kernel.h>
21 #include <linux/err.h>
22 #include <linux/delay.h>
23 #include <linux/interrupt.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/of.h>
26 #include <linux/nvs.h>
27 #include <linux/crc32.h>
28 #include <linux/mpu_iio.h>
29
30 #include "nvi.h"
31
32 #define NVI_DRIVER_VERSION              (329)
33 #define NVI_VENDOR                      "Invensense"
34 #define NVI_NAME                        "mpu6xxx"
35 #define NVI_NAME_MPU6050                "mpu6050"
36 #define NVI_NAME_MPU6500                "mpu6500"
37 #define NVI_NAME_MPU6515                "mpu6515"
38 #define NVI_NAME_MPU9150                "mpu9150"
39 #define NVI_NAME_MPU9250                "mpu9250"
40 #define NVI_NAME_MPU9350                "mpu9350"
41 #define NVI_NAME_ICM20628               "icm20628"
42 #define NVI_NAME_ICM20630               "icm20630"
43 #define NVI_NAME_ICM20632               "icm20632"
44 #define NVI_HW_ID_AUTO                  (0xFF)
45 #define NVI_HW_ID_MPU6050               (0x68)
46 #define NVI_HW_ID_MPU6500               (0x70)
47 #define NVI_HW_ID_MPU6515               (0x74)
48 #define NVI_HW_ID_MPU9150               (0x68)
49 #define NVI_HW_ID_MPU9250               (0x71)
50 #define NVI_HW_ID_MPU9350               (0x72)
51 #define NVI_HW_ID_ICM20628              (0xA2)
52 #define NVI_HW_ID_ICM20630              (0xAB)
53 #define NVI_HW_ID_ICM20632              (0xAD)
54 /* NVI_FW_CRC_CHECK used only during development to confirm valid FW */
55 #define NVI_FW_CRC_CHECK                (0)
56
57 struct nvi_pdata {
58         struct nvi_state st;
59         struct work_struct fw_load_work;
60         const struct i2c_device_id *i2c_dev_id;
61 };
62
63 struct nvi_id_hal {
64         u8 hw_id;
65         const char *name;
66         const struct nvi_hal *hal;
67 };
68 /* ARRAY_SIZE(nvi_id_hals) must match ARRAY_SIZE(nvi_i2c_device_id) - 1 */
69 enum NVI_NDX {
70         NVI_NDX_AUTO = 0,
71         NVI_NDX_MPU6050,
72         NVI_NDX_MPU6500,
73         NVI_NDX_MPU6515,
74         NVI_NDX_MPU9150,
75         NVI_NDX_MPU9250,
76         NVI_NDX_MPU9350,
77         NVI_NDX_ICM20628,
78         NVI_NDX_ICM20630,
79         NVI_NDX_ICM20632,
80         NVI_NDX_N,
81 };
82 /* enum NVI_NDX_N must match ARRAY_SIZE(nvi_i2c_device_id) - 1 */
83 static struct i2c_device_id nvi_i2c_device_id[] = {
84         { NVI_NAME, NVI_NDX_AUTO },
85         { NVI_NAME_MPU6050, NVI_NDX_MPU6050 },
86         { NVI_NAME_MPU6500, NVI_NDX_MPU6500 },
87         { NVI_NAME_MPU6515, NVI_NDX_MPU6515 },
88         { NVI_NAME_MPU9150, NVI_NDX_MPU9150 },
89         { NVI_NAME_MPU9250, NVI_NDX_MPU9250 },
90         { NVI_NAME_MPU9350, NVI_NDX_MPU9350 },
91         { NVI_NAME_ICM20628, NVI_NDX_ICM20628 },
92         { NVI_NAME_ICM20630, NVI_NDX_ICM20630 },
93         { NVI_NAME_ICM20632, NVI_NDX_ICM20632 },
94         {}
95 };
96
97 enum NVI_INFO {
98         NVI_INFO_VER = 0,
99         NVI_INFO_DBG,
100         NVI_INFO_DBG_SPEW,
101         NVI_INFO_AUX_SPEW,
102         NVI_INFO_FIFO_SPEW,
103         NVI_INFO_TS_SPEW,
104         NVI_INFO_SNSR_SPEW,
105         NVI_INFO_REG_WR = 0xC6, /* use 0xD0 on cmd line */
106         NVI_INFO_MEM_RD,
107         NVI_INFO_MEM_WR,
108         NVI_INFO_DMP_FW,
109         NVI_INFO_DMP_EN_MSK
110 };
111
112 /* regulator names in order of powering on */
113 static char *nvi_vregs[] = {
114         "vdd",
115         "vlogic",
116 };
117
118 static struct nvi_state *nvi_state_local;
119
120
121 static int nvi_dmp_fw(struct nvi_state *st);
122 static int nvi_aux_bypass_enable(struct nvi_state *st, bool enable);
123 static int nvi_read(struct nvi_state *st, bool flush);
124
125 static int nvi_nb_vreg(struct nvi_state *st,
126                        unsigned long event, unsigned int i)
127 {
128         if (event & REGULATOR_EVENT_POST_ENABLE)
129                 st->ts_vreg_en[i] = nvs_timestamp();
130         else if (event & (REGULATOR_EVENT_DISABLE |
131                           REGULATOR_EVENT_FORCE_DISABLE))
132                 st->ts_vreg_en[i] = 0;
133         if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG))
134                 dev_info(&st->i2c->dev, "%s %s event=0x%x ts=%lld\n",
135                          __func__, st->vreg[i].supply, (unsigned int)event,
136                          st->ts_vreg_en[i]);
137         return NOTIFY_OK;
138 }
139
140 static int nvi_nb_vreg_vdd(struct notifier_block *nb,
141                            unsigned long event, void *ignored)
142 {
143         struct nvi_state *st = container_of(nb, struct nvi_state, nb_vreg[0]);
144
145         return nvi_nb_vreg(st, event, 0);
146 }
147
148 static int nvi_nb_vreg_vlogic(struct notifier_block *nb,
149                               unsigned long event, void *ignored)
150 {
151         struct nvi_state *st = container_of(nb, struct nvi_state, nb_vreg[1]);
152
153         return nvi_nb_vreg(st, event, 1);
154 }
155
156 static int (* const nvi_nb_vreg_pf[])(struct notifier_block *nb,
157                                       unsigned long event, void *ignored) = {
158         nvi_nb_vreg_vdd,
159         nvi_nb_vreg_vlogic,
160 };
161
162 void nvi_err(struct nvi_state *st)
163 {
164         st->errs++;
165         if (!st->errs)
166                 st->errs--;
167 }
168
169 static void nvi_mutex_lock(struct nvi_state *st)
170 {
171         unsigned int i;
172
173         if (st->nvs) {
174                 for (i = 0; i < DEV_N; i++)
175                         st->nvs->nvs_mutex_lock(st->snsr[i].nvs_st);
176         }
177 }
178
179 static void nvi_mutex_unlock(struct nvi_state *st)
180 {
181         unsigned int i;
182
183         if (st->nvs) {
184                 for (i = 0; i < DEV_N; i++)
185                         st->nvs->nvs_mutex_unlock(st->snsr[i].nvs_st);
186         }
187 }
188
189 static void nvi_disable_irq(struct nvi_state *st)
190 {
191         if (st->i2c->irq && !st->irq_dis) {
192                 disable_irq_nosync(st->i2c->irq);
193                 st->irq_dis = true;
194                 if (st->sts & NVS_STS_SPEW_MSG)
195                         dev_info(&st->i2c->dev, "%s IRQ disabled\n", __func__);
196         }
197 }
198
199 static void nvi_enable_irq(struct nvi_state *st)
200 {
201         if (st->i2c->irq && st->irq_dis) {
202                 enable_irq(st->i2c->irq);
203                 st->irq_dis = false;
204                 if (st->sts & NVS_STS_SPEW_MSG)
205                         dev_info(&st->i2c->dev, "%s IRQ enabled\n", __func__);
206         }
207 }
208
209 static int nvi_i2c_w(struct nvi_state *st, u16 len, u8 *buf)
210 {
211         struct i2c_msg msg;
212
213         msg.addr = st->i2c->addr;
214         msg.flags = 0;
215         msg.len = len;
216         msg.buf = buf;
217         if (i2c_transfer(st->i2c->adapter, &msg, 1) != 1) {
218                 nvi_err(st);
219                 return -EIO;
220         }
221
222         return 0;
223 }
224
225 static int nvi_wr_reg_bank_sel(struct nvi_state *st, u8 reg_bank)
226 {
227         u8 buf[2];
228         int ret = 0;
229
230         if (!st->hal->reg->reg_bank.reg)
231                 return 0;
232
233         reg_bank <<= 4;
234         if (reg_bank != st->rc.reg_bank) {
235                 buf[0] = st->hal->reg->reg_bank.reg;
236                 buf[1] = reg_bank;
237                 ret = nvi_i2c_w(st, sizeof(buf), buf);
238                 if (ret) {
239                         dev_err(&st->i2c->dev, "%s 0x%x!->0x%x ERR=%d\n",
240                                 __func__, st->rc.reg_bank, reg_bank, ret);
241                 } else {
242                         if (st->sts & NVI_DBG_SPEW_MSG)
243                                 dev_info(&st->i2c->dev, "%s 0x%x->0x%x\n",
244                                          __func__, st->rc.reg_bank, reg_bank);
245                         st->rc.reg_bank = reg_bank;
246                 }
247         }
248         return ret;
249 }
250
251 static int nvi_i2c_write(struct nvi_state *st, u8 bank, u16 len, u8 *buf)
252 {
253         int ret;
254
255         ret = nvi_wr_reg_bank_sel(st, bank);
256         if (!ret)
257                 ret = nvi_i2c_w(st, len, buf);
258         return ret;
259 }
260
261 static int nvi_i2c_write_be(struct nvi_state *st, const struct nvi_br *br,
262                             u16 len, u32 val)
263 {
264         u8 buf[5];
265         unsigned int i;
266
267         buf[0] = br->reg;
268         for (i = len; i > 0; i--)
269                 buf[i] = (u8)(val >> (8 * (len - i)));
270         return nvi_i2c_write(st, br->bank, len + 1, buf);
271 }
272
273 static int nvi_i2c_write_le(struct nvi_state *st, const struct nvi_br *br,
274                             u16 len, u32 val)
275 {
276         u8 buf[5];
277         unsigned int i;
278
279         buf[0] = br->reg;
280         for (i = 0; i < len; i++)
281                 buf[i + 1] = (u8)(val >> (8 * i));
282         return nvi_i2c_write(st, br->bank, len + 1, buf);
283 }
284
285 int nvi_i2c_write_rc(struct nvi_state *st, const struct nvi_br *br, u32 val,
286                      const char *fn, u8 *rc, bool be)
287 {
288         bool wr = false;
289         u16 len;
290         unsigned int i;
291         int ret = 0;
292
293         len = br->len;
294         if (!len)
295                 len++;
296         val |= br->dflt;
297         if (rc != NULL) {
298                 for (i = 0; i < len; i++) {
299                         if (*(rc + i) != (u8)(val >> (8 * i))) {
300                                 wr = true;
301                                 break;
302                         }
303                 }
304         } else {
305                 wr = true;
306         }
307         if (wr || st->rc_dis) {
308                 if (be)
309                         ret = nvi_i2c_write_be(st, br, len, val);
310                 else
311                         ret = nvi_i2c_write_le(st, br, len, val);
312                 if (ret) {
313                         if (fn == NULL)
314                                 fn = __func__;
315                         dev_err(&st->i2c->dev,
316                                 "%s 0x%08x!=>0x%01x%02x ERR=%d\n",
317                                 fn, val, br->bank, br->reg, ret);
318                 } else {
319                         if (st->sts & NVI_DBG_SPEW_MSG && fn)
320                                 dev_info(&st->i2c->dev,
321                                          "%s 0x%08x=>0x%01x%02x\n",
322                                          fn, val, br->bank, br->reg);
323                         if (rc != NULL) {
324                                 for (i = 0; i < len; i++)
325                                         *(rc + i) = (u8)(val >> (8 * i));
326                         }
327                 }
328         }
329         return ret;
330 }
331
332 int nvi_i2c_wr(struct nvi_state *st, const struct nvi_br *br,
333                u8 val, const char *fn)
334 {
335         u8 buf[2];
336         int ret;
337
338         buf[0] = br->reg;
339         buf[1] = val | br->dflt;
340         ret = nvi_wr_reg_bank_sel(st, br->bank);
341         if (!ret) {
342                 ret = nvi_i2c_w(st, sizeof(buf), buf);
343                 if (ret) {
344                         if (fn == NULL)
345                                 fn = __func__;
346                         dev_err(&st->i2c->dev,
347                                 "%s 0x%02x!=>0x%01x%02x ERR=%d\n",
348                                 fn, val, br->bank, br->reg, ret);
349                 } else {
350                         if (st->sts & NVI_DBG_SPEW_MSG && fn)
351                                 dev_info(&st->i2c->dev,
352                                          "%s 0x%02x=>0x%01x%02x\n",
353                                          fn, val, br->bank, br->reg);
354                 }
355         }
356         return ret;
357 }
358
359 int nvi_i2c_wr_rc(struct nvi_state *st, const struct nvi_br *br,
360                   u8 val, const char *fn, u8 *rc)
361 {
362         int ret = 0;
363
364         val |= br->dflt;
365         if (val != *rc || st->rc_dis) {
366                 ret = nvi_i2c_wr(st, br, val, fn);
367                 if (!ret)
368                         *rc = val;
369         }
370         return ret;
371 }
372
373 int nvi_i2c_r(struct nvi_state *st, u8 bank, u8 reg, u16 len, u8 *buf)
374 {
375         struct i2c_msg msg[2];
376         int ret;
377
378         ret = nvi_wr_reg_bank_sel(st, bank);
379         if (ret)
380                 return ret;
381
382         if (!len)
383                 len++;
384         msg[0].addr = st->i2c->addr;
385         msg[0].flags = 0;
386         msg[0].len = 1;
387         msg[0].buf = &reg;
388         msg[1].addr = st->i2c->addr;
389         msg[1].flags = I2C_M_RD;
390         msg[1].len = len;
391         msg[1].buf = buf;
392         if (i2c_transfer(st->i2c->adapter, msg, 2) != 2) {
393                 nvi_err(st);
394                 return -EIO;
395         }
396
397         return 0;
398 }
399
400 int nvi_i2c_rd(struct nvi_state *st, const struct nvi_br *br, u8 *buf)
401 {
402         u16 len = br->len;
403
404         if (!len)
405                 len = 1;
406         return nvi_i2c_r(st, br->bank, br->reg, len, buf);
407 }
408
409 int nvi_mem_wr(struct nvi_state *st, u16 addr, u16 len, u8 *data,
410                bool validate)
411 {
412         struct i2c_msg msg[6];
413         u8 buf_bank[2];
414         u8 buf_addr[2];
415         u8 buf_data[257];
416         u16 bank_len;
417         u16 data_len;
418         unsigned int data_i;
419         int ret;
420
421         ret = nvi_wr_reg_bank_sel(st, st->hal->reg->mem_bank.bank);
422         if (ret)
423                 return ret;
424
425         buf_bank[0] = st->hal->reg->mem_bank.reg;
426         buf_bank[1] = addr >> 8;
427         buf_addr[0] = st->hal->reg->mem_addr.reg;
428         buf_addr[1] = addr & 0xFF;
429         buf_data[0] = st->hal->reg->mem_rw.reg;
430         msg[0].addr = st->i2c->addr;
431         msg[0].flags = 0;
432         msg[0].len = sizeof(buf_bank);
433         msg[0].buf = buf_bank;
434         msg[1].addr = st->i2c->addr;
435         msg[1].flags = 0;
436         msg[1].len = sizeof(buf_addr);
437         msg[1].buf = buf_addr;
438         msg[2].addr = st->i2c->addr;
439         msg[2].flags = 0;
440         msg[2].buf = buf_data;
441         msg[3].addr = st->i2c->addr;
442         msg[3].flags = 0;
443         msg[3].len = sizeof(buf_addr);
444         msg[3].buf = buf_addr;
445         msg[4].addr = st->i2c->addr;
446         msg[4].flags = 0;
447         msg[4].len = 1;
448         msg[4].buf = buf_data;
449         msg[5].addr = st->i2c->addr;
450         msg[5].flags = I2C_M_RD;
451         msg[5].buf = &buf_data[1];
452         data_i = 0;
453         bank_len = (addr + len - 1) >> 8;
454         for (; buf_bank[1] <= bank_len; buf_bank[1]++) {
455                 if (buf_bank[1] == bank_len)
456                         data_len = len - data_i;
457                 else
458                         data_len = 0x0100 - buf_addr[1];
459                 msg[2].len = data_len + 1;
460                 memcpy(&buf_data[1], data + data_i, data_len);
461                 if (i2c_transfer(st->i2c->adapter, msg, 3) != 3) {
462                         nvi_err(st);
463                         return -EIO;
464                 }
465
466                 if (validate) {
467                         msg[5].len = data_len;
468                         if (i2c_transfer(st->i2c->adapter, &msg[3], 3) != 3) {
469                                 nvi_err(st);
470                                 return -EIO;
471                         }
472
473                         ret = memcmp(&buf_data[1], data + data_i, data_len);
474                         if (ret)
475                                 return ret;
476                 }
477
478                 data_i += data_len;
479                 buf_addr[1] = 0;
480         }
481
482         return 0;
483 }
484
485 int nvi_mem_wr_be(struct nvi_state *st, u16 addr, u16 len, u32 val)
486 {
487         u8 buf[4];
488         unsigned int i;
489         int ret;
490
491         for (i = 0; i < len; i++)
492                 buf[i] = (u8)(val >> (8 * (len - (i + 1))));
493         ret = nvi_mem_wr(st, addr, len, buf, false);
494         if (st->sts & NVI_DBG_SPEW_MSG)
495                 dev_info(&st->i2c->dev, "%s 0x%08x=>0x%04hx err=%d\n",
496                          __func__, val, addr, ret);
497         return ret;
498 }
499
500 int nvi_mem_wr_be_mc(struct nvi_state *st, u16 addr, u16 len, u32 val, u32 *mc)
501 {
502         int ret = 0;
503
504         if (val != *mc || st->mc_dis) {
505                 ret = nvi_mem_wr_be(st, addr, len, val);
506                 if (!ret)
507                         *mc = val;
508         }
509         return ret;
510 }
511
512 int nvi_mem_rd(struct nvi_state *st, u16 addr, u16 len, u8 *data)
513 {
514         struct i2c_msg msg[4];
515         u8 buf_bank[2];
516         u8 buf_addr[2];
517         u16 bank_len;
518         u16 data_len;
519         unsigned int data_i;
520         int ret;
521
522         ret = nvi_wr_reg_bank_sel(st, st->hal->reg->mem_bank.bank);
523         if (ret)
524                 return ret;
525
526         buf_bank[0] = st->hal->reg->mem_bank.reg;
527         buf_bank[1] = addr >> 8;
528         buf_addr[0] = st->hal->reg->mem_addr.reg;
529         buf_addr[1] = addr & 0xFF;
530         msg[0].addr = st->i2c->addr;
531         msg[0].flags = 0;
532         msg[0].len = sizeof(buf_bank);
533         msg[0].buf = buf_bank;
534         msg[1].addr = st->i2c->addr;
535         msg[1].flags = 0;
536         msg[1].len = sizeof(buf_addr);
537         msg[1].buf = buf_addr;
538         msg[2].addr = st->i2c->addr;
539         msg[2].flags = 0;
540         msg[2].len = 1;
541         msg[2].buf = (u8 *)&st->hal->reg->mem_rw.reg;
542         msg[3].addr = st->i2c->addr;
543         msg[3].flags = I2C_M_RD;
544         data_i = 0;
545         bank_len = (addr + len - 1) >> 8;
546         for (; buf_bank[1] <= bank_len; buf_bank[1]++) {
547                 if (buf_bank[1] == bank_len)
548                         data_len = len - data_i;
549                 else
550                         data_len = 0x0100 - buf_addr[1];
551                 msg[3].len = data_len;
552                 msg[3].buf = data + data_i;
553                 if (i2c_transfer(st->i2c->adapter, msg, 4) != 4) {
554                         nvi_err(st);
555                         return -EIO;
556                 }
557
558                 data_i += data_len;
559                 buf_addr[1] = 0;
560         }
561
562         return 0;
563 }
564
565 int nvi_mem_rd_le(struct nvi_state *st, u16 addr, u16 len, u32 *val)
566 {
567         u32 buf_le = 0;
568         u8 buf_rd[4];
569         unsigned int i;
570         int ret;
571
572         ret = nvi_mem_rd(st, addr, len, buf_rd);
573         if (!ret) {
574                 /* convert to little endian */
575                 for (i = 0; i < len; i++) {
576                         buf_le <<= 8;
577                         buf_le |= buf_rd[i];
578                 }
579
580                 *val = buf_le;
581         }
582
583         return ret;
584 }
585
586 static int nvi_rd_accel_offset(struct nvi_state *st)
587 {
588         u8 buf[2];
589         unsigned int i;
590         int ret;
591
592         for (i = 0; i < AXIS_N; i++) {
593                 ret = nvi_i2c_rd(st, &st->hal->reg->a_offset_h[i], buf);
594                 if (!ret)
595                         st->rc.accel_offset[i] = be16_to_cpup((__be16 *)buf);
596         }
597         return ret;
598 }
599
600 int nvi_wr_accel_offset(struct nvi_state *st, unsigned int axis, u16 offset)
601 {
602         return nvi_i2c_write_rc(st, &st->hal->reg->a_offset_h[axis], offset,
603                              __func__, (u8 *)&st->rc.accel_offset[axis], true);
604 }
605
606 static int nvi_rd_gyro_offset(struct nvi_state *st)
607 {
608         u8 buf[2];
609         unsigned int i;
610         int ret;
611
612         for (i = 0; i < AXIS_N; i++) {
613                 ret = nvi_i2c_rd(st, &st->hal->reg->g_offset_h[i], buf);
614                 if (!ret)
615                         st->rc.gyro_offset[i] = be16_to_cpup((__be16 *)buf);
616         }
617         return ret;
618 }
619
620 int nvi_wr_gyro_offset(struct nvi_state *st, unsigned int axis, u16 offset)
621 {
622         return nvi_i2c_write_rc(st, &st->hal->reg->g_offset_h[axis], offset,
623                               __func__, (u8 *)&st->rc.gyro_offset[axis], true);
624 }
625
626 int nvi_wr_fifo_cfg(struct nvi_state *st, int fifo)
627 {
628         u8 fifo_cfg;
629
630         if (!st->hal->reg->fifo_cfg.reg)
631                 return 0;
632
633         if (fifo >= 0)
634                 fifo_cfg = (fifo << 2) | 0x01;
635         else
636                 fifo_cfg = 0;
637         return nvi_i2c_wr_rc(st, &st->hal->reg->fifo_cfg, fifo_cfg,
638                              NULL, &st->rc.fifo_cfg);
639 }
640
641 static int nvi_wr_i2c_slv4_ctrl(struct nvi_state *st, bool slv4_en)
642 {
643         u8 val;
644
645         val = st->aux.delay_hw;
646         val |= (st->aux.port[AUX_PORT_IO].nmp.ctrl & BIT_I2C_SLV_REG_DIS);
647         if (slv4_en)
648                 val |= BIT_SLV_EN;
649         return nvi_i2c_wr_rc(st, &st->hal->reg->i2c_slv4_ctrl, val,
650                              __func__, &st->rc.i2c_slv4_ctrl);
651 }
652
653 static int nvi_rd_int_sts_dmp(struct nvi_state *st)
654 {
655         int ret;
656
657         ret = nvi_i2c_rd(st, &st->hal->reg->int_dmp, &st->rc.int_dmp);
658         if (ret)
659                 dev_err(&st->i2c->dev, "%s %x=ERR %d\n",
660                         __func__, st->hal->reg->int_dmp.reg, ret);
661         return ret;
662 }
663
664 static int nvi_rd_int_status(struct nvi_state *st)
665 {
666         u8 buf[4] = {0, 0, 0, 0};
667         unsigned int i;
668         unsigned int n;
669         int ret;
670
671         ret = nvi_i2c_rd(st, &st->hal->reg->int_status, buf);
672         if (ret) {
673                 dev_err(&st->i2c->dev, "%s %x=ERR %d\n",
674                         __func__, st->hal->reg->int_status.reg, ret);
675         } else {
676                 /* convert to little endian */
677                 st->rc.int_status = 0;
678                 n = st->hal->reg->int_status.len;
679                 if (!n)
680                         n++;
681                 for (i = 0; i < n; i++) {
682                         st->rc.int_status <<= 8;
683                         st->rc.int_status |= buf[i];
684                 }
685
686                 if (st->rc.int_status & (1 << st->hal->bit->int_dmp))
687                         ret = nvi_rd_int_sts_dmp(st);
688         }
689
690         return ret;
691 }
692
693 int nvi_int_able(struct nvi_state *st, const char *fn, bool en)
694 {
695         u32 int_en = 0;
696         u32 int_msk;
697         unsigned int fifo;
698         int dev;
699         int ret;
700
701         if (en) {
702                 if (st->en_msk & (1 << DEV_DMP)) {
703                         int_en |= 1 << st->hal->bit->int_dmp;
704                 } else if (st->en_msk & MSK_DEV_ALL) {
705                         int_msk = 1 << st->hal->bit->int_data_rdy_0;
706                         if (st->rc.fifo_cfg & 0x01) {
707                                 /* multi FIFO enabled */
708                                 fifo = 0;
709                                 for (; fifo < st->hal->fifo_n; fifo++) {
710                                         dev = st->hal->fifo_dev[fifo];
711                                         if (dev < 0)
712                                                 continue;
713
714                                         if (st->rc.fifo_en & st->hal->
715                                                          dev[dev]->fifo_en_msk)
716                                                 int_en |= int_msk << fifo;
717                                 }
718                         } else {
719                                 int_en |= int_msk;
720                         }
721                 }
722         }
723         ret = nvi_i2c_write_rc(st, &st->hal->reg->int_enable, int_en,
724                                __func__, (u8 *)&st->rc.int_enable, false);
725         if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG))
726                 dev_info(&st->i2c->dev, "%s-%s en=%x int_en=%x err=%d\n",
727                          __func__, fn, en, int_en, ret);
728         return ret;
729 }
730
731 static void nvi_flush_aux(struct nvi_state *st, int port)
732 {
733         struct aux_port *ap = &st->aux.port[port];
734
735         if (ap->nmp.handler)
736                 ap->nmp.handler(NULL, 0, 0, ap->nmp.ext_driver);
737 }
738
739 static void nvi_flush_push(struct nvi_state *st)
740 {
741         struct aux_port *ap;
742         unsigned int i;
743         int ret;
744
745         for (i = 0; i < DEV_N; i++) {
746                 if (st->snsr[i].flush) {
747                         ret = st->nvs->handler(st->snsr[i].nvs_st, NULL, 0LL);
748                         if (ret >= 0)
749                                 st->snsr[i].flush = false;
750                 }
751         }
752         for (i = 0; i < AUX_PORT_IO; i++) {
753                 ap = &st->aux.port[i];
754                 if (ap->flush)
755                         nvi_flush_aux(st, i);
756                 ap->flush = false;
757         }
758 }
759
760 static int nvi_user_ctrl_rst(struct nvi_state *st, u8 user_ctrl)
761 {
762         u8 fifo_rst;
763         unsigned int msk;
764         unsigned int n;
765         int i;
766         int ret = 0;
767         int ret_t = 0;
768
769         if (user_ctrl & BIT_SIG_COND_RST)
770                 user_ctrl = BITS_USER_CTRL_RST;
771         if (user_ctrl & BIT_DMP_RST)
772                 user_ctrl |= BIT_FIFO_RST;
773         if (user_ctrl & BIT_FIFO_RST) {
774                 st->buf_i = 0;
775                 if (st->hal->reg->fifo_rst.reg) {
776                         /* ICM part */
777                         if (st->en_msk & (1 << DEV_DMP)) {
778                                 ret = nvi_wr_fifo_cfg(st, 0);
779                         } else {
780                                 n = 0;
781                                 for (i = 0; i < DEV_AXIS_N; i++) {
782                                         if (st->hal->dev[i]->fifo_en_msk &&
783                                                             st->snsr[i].enable)
784                                                 n++;
785                                 }
786
787                                 msk = st->snsr[DEV_AUX].enable;
788                                 msk |= st->aux.dmp_en_msk;
789                                 if (st->hal->dev[DEV_AUX]->fifo_en_msk && msk)
790                                         n++;
791                                 if (n > 1)
792                                         ret = nvi_wr_fifo_cfg(st, 0);
793                                 else
794                                         ret = nvi_wr_fifo_cfg(st, -1);
795                         }
796                         if (st->en_msk & (1 << DEV_DMP))
797                                 fifo_rst = 0x1E;
798                         else
799                                 fifo_rst = 0;
800                         ret |= nvi_i2c_wr(st, &st->hal->reg->fifo_rst,
801                                           0x1F, __func__);
802                         ret |= nvi_i2c_wr(st, &st->hal->reg->fifo_rst,
803                                           fifo_rst, __func__);
804                         if (ret)
805                                 ret_t |= ret;
806                         else
807                                 nvi_flush_push(st);
808                         if (user_ctrl == BIT_FIFO_RST)
809                                 /* then done */
810                                 return ret_t;
811
812                         user_ctrl &= ~BIT_FIFO_RST;
813                 }
814         }
815
816         ret =  nvi_i2c_wr(st, &st->hal->reg->user_ctrl, user_ctrl, __func__);
817         if (ret) {
818                 ret_t |= ret;
819         } else {
820                 if (user_ctrl & BIT_FIFO_RST)
821                         nvi_flush_push(st);
822                 for (i = 0; i < POWER_UP_TIME; i++) {
823                         user_ctrl = -1;
824                         ret = nvi_i2c_rd(st, &st->hal->reg->user_ctrl,
825                                          &user_ctrl);
826                         if (!(user_ctrl & BITS_USER_CTRL_RST))
827                                 break;
828
829                         mdelay(1);
830                 }
831                 ret_t |= ret;
832                 st->rc.user_ctrl = user_ctrl;
833                 if (user_ctrl & BIT_DMP_RST && st->hal->dmp) {
834                         if (st->hal->dmp->dmp_reset_delay_ms)
835                                 msleep(st->hal->dmp->dmp_reset_delay_ms);
836                 }
837         }
838
839         return ret_t;
840 }
841
842 int nvi_user_ctrl_en(struct nvi_state *st, const char *fn,
843                      bool en_dmp, bool en_fifo, bool en_i2c, bool en_irq)
844 {
845         struct aux_port *ap;
846         int i;
847         int ret = 0;
848         u32 val = 0;
849
850         if (en_dmp) {
851                 if (!(st->en_msk & (1 << DEV_DMP)))
852                         en_dmp = false;
853         }
854         if (en_fifo && !en_dmp) {
855                 for (i = 0; i < st->hal->src_n; i++)
856                         st->src[i].fifo_data_n = 0;
857
858                 for (i = 0; i < DEV_MPU_N; i++) {
859                         if (st->snsr[i].enable &&
860                                                 st->hal->dev[i]->fifo_en_msk) {
861                                 val |= st->hal->dev[i]->fifo_en_msk;
862                                 st->src[st->hal->dev[i]->src].fifo_data_n +=
863                                                   st->hal->dev[i]->fifo_data_n;
864                                 st->fifo_src = st->hal->dev[i]->src;
865                         }
866                 }
867
868                 if (st->hal->dev[DEV_AUX]->fifo_en_msk &&
869                                                     st->snsr[DEV_AUX].enable) {
870                         st->src[st->hal->dev[DEV_AUX]->src].fifo_data_n +=
871                                                             st->aux.ext_data_n;
872                         st->fifo_src = st->hal->dev[DEV_AUX]->src;
873                         for (i = 0; i < AUX_PORT_IO; i++) {
874                                 ap = &st->aux.port[i];
875                                 if (st->snsr[DEV_AUX].enable & (1 << i) &&
876                                                (ap->nmp.addr & BIT_I2C_READ) &&
877                                                              ap->nmp.handler) {
878                                         val |= (1 <<
879                                                 st->hal->bit->slv_fifo_en[i]);
880                                 }
881                         }
882                 }
883
884                 if (!val)
885                         en_fifo = false;
886         }
887         ret |= nvi_i2c_write_rc(st, &st->hal->reg->fifo_en, val,
888                                 __func__, (u8 *)&st->rc.fifo_en, false);
889         if (!ret) {
890                 val = 0;
891                 if (en_dmp)
892                         val |= BIT_DMP_EN;
893                 if (en_fifo)
894                         val |= BIT_FIFO_EN;
895                 if (en_i2c && (st->en_msk & (1 << DEV_AUX)))
896                         val |= BIT_I2C_MST_EN;
897                 else
898                         en_i2c = false;
899                 if (en_irq && val)
900                         ret = nvi_int_able(st, __func__, true);
901                 else
902                         en_irq = false;
903                 ret |= nvi_i2c_wr_rc(st, &st->hal->reg->user_ctrl, val,
904                                      __func__, &st->rc.user_ctrl);
905         }
906         if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG))
907                 dev_info(&st->i2c->dev,
908                          "%s-%s DMP=%x FIFO=%x I2C=%x IRQ=%x err=%d\n",
909                          __func__, fn, en_dmp, en_fifo, en_i2c, en_irq, ret);
910         return ret;
911 }
912
913 int nvi_wr_pm1(struct nvi_state *st, const char *fn, u8 pm1)
914 {
915         u8 pm1_rd;
916         unsigned int i;
917         int ret = 0;
918
919         if (pm1 & BIT_H_RESET) {
920                 /* must make sure FIFO is off or IRQ storm will occur */
921                 ret = nvi_int_able(st, __func__, false);
922                 ret |= nvi_user_ctrl_en(st, __func__,
923                                         false, false, false, false);
924                 if (!ret) {
925                         nvi_user_ctrl_rst(st, BITS_USER_CTRL_RST);
926                         ret = nvi_i2c_wr(st, &st->hal->reg->pm1,
927                                          BIT_H_RESET, __func__);
928                 }
929         } else {
930                 ret = nvi_i2c_wr_rc(st, &st->hal->reg->pm1, pm1,
931                                     __func__, &st->rc.pm1);
932         }
933         st->pm = NVI_PM_ERR;
934         if (pm1 & BIT_H_RESET && !ret) {
935                 st->en_msk &= MSK_RST;
936                 memset(&st->rc, 0, sizeof(st->rc));
937                 if (st->hal->fn->por2rc)
938                         st->hal->fn->por2rc(st);
939                 for (i = 0; i < st->hal->src_n; i++)
940                         st->src[i].period_us_req = 0;
941
942                 for (i = 0; i < (POWER_UP_TIME / REG_UP_TIME); i++) {
943                         mdelay(REG_UP_TIME);
944                         pm1_rd = -1;
945                         ret = nvi_i2c_rd(st, &st->hal->reg->pm1, &pm1_rd);
946                         if ((!ret) && (!(pm1_rd & BIT_H_RESET)))
947                                 break;
948                 }
949
950                 msleep(POR_MS);
951                 st->rc.pm1 = pm1_rd;
952                 nvi_rd_accel_offset(st);
953                 nvi_rd_gyro_offset(st);
954                 nvi_dmp_fw(st);
955                 st->rc_dis = false;
956         }
957         if (st->sts & NVI_DBG_SPEW_MSG)
958                 dev_info(&st->i2c->dev, "%s-%s pm1=%x err=%d\n",
959                          __func__, fn, pm1, ret);
960         return ret;
961 }
962
963 static int nvi_pm_w(struct nvi_state *st, u8 pm1, u8 pm2, u8 lp)
964 {
965         s64 por_ns;
966         unsigned int delay_ms;
967         unsigned int i;
968         int ret;
969
970         ret = nvs_vregs_enable(&st->i2c->dev, st->vreg, ARRAY_SIZE(nvi_vregs));
971         if (ret) {
972                 delay_ms = 0;
973                 for (i = 0; i < ARRAY_SIZE(nvi_vregs); i++) {
974                         por_ns = nvs_timestamp() - st->ts_vreg_en[i];
975                         if ((por_ns < 0) || (!st->ts_vreg_en[i])) {
976                                 delay_ms = (POR_MS * 1000000);
977                                 break;
978                         }
979
980                         if (por_ns < (POR_MS * 1000000)) {
981                                 por_ns = (POR_MS * 1000000) - por_ns;
982                                 if (por_ns > delay_ms)
983                                         delay_ms = (unsigned int)por_ns;
984                         }
985                 }
986                 delay_ms /= 1000000;
987                 if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG))
988                         dev_info(&st->i2c->dev, "%s %ums delay\n",
989                                  __func__, delay_ms);
990                 if (delay_ms)
991                         msleep(delay_ms);
992                 ret = nvi_wr_pm1(st, __func__, BIT_H_RESET);
993         }
994         ret |= st->hal->fn->pm(st, pm1, pm2, lp);
995         return ret;
996 }
997
998 int nvi_pm_wr(struct nvi_state *st, const char *fn, u8 pm1, u8 pm2, u8 lp)
999 {
1000         int ret;
1001
1002         ret = nvi_pm_w(st, pm1, pm2, lp);
1003         if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG))
1004                 dev_info(&st->i2c->dev, "%s-%s PM1=%x PM2=%x LPA=%x err=%d\n",
1005                          __func__, fn, pm1, pm2, lp, ret);
1006         st->pm = NVI_PM_ERR; /* lost st->pm status: nvi_pm is being bypassed */
1007         return ret;
1008 }
1009
1010 /**
1011  * @param st
1012  * @param pm_req: call with one of the following:
1013  *      NVI_PM_OFF_FORCE = force off state
1014  *      NVI_PM_ON = minimum power for device access
1015  *      NVI_PM_ON_FULL = power for gyro
1016  *      NVI_PM_AUTO = automatically sets power after
1017  *                    configuration.
1018  *      Typical use is to set needed power for configuration and
1019  *      then call with NVI_PM_AUTO when done. All other NVI_PM_
1020  *      levels are handled automatically and are for internal
1021  *      use.
1022  * @return int: returns 0 for success or error code
1023  */
1024 static int nvi_pm(struct nvi_state *st, const char *fn, int pm_req)
1025 {
1026         u8 pm1;
1027         u8 pm2;
1028         u8 lp;
1029         int i;
1030         int pm;
1031         int ret = 0;
1032
1033         lp = st->rc.lp_config;
1034         if (pm_req == NVI_PM_AUTO) {
1035                 pm2 = 0;
1036                 if (!(st->en_msk & MSK_PM_ACC_EN))
1037                         pm2 |= BIT_PWR_ACCEL_STBY;
1038                 if (!st->snsr[DEV_GYR].enable)
1039                         pm2 |= BIT_PWR_GYRO_STBY;
1040                 if (st->en_msk & MSK_PM_ON_FULL) {
1041                         pm = NVI_PM_ON_FULL;
1042                 } else if (st->en_msk & MSK_PM_ON) {
1043                         pm = NVI_PM_ON;
1044                 } else if ((st->en_msk & ((1 << EN_LP) |
1045                                           MSK_DEV_ALL)) == MSK_PM_LP) {
1046                         if (st->snsr[DEV_ACC].period_us >=
1047                                              st->snsr[DEV_ACC].cfg.thresh_hi) {
1048                                 for (lp = 0; lp < st->hal->lp_tbl_n; lp++) {
1049                                         if (st->snsr[DEV_ACC].period_us >=
1050                                                            st->hal->lp_tbl[lp])
1051                                                 break;
1052                                 }
1053                                 pm = NVI_PM_ON_CYCLE;
1054                         } else {
1055                                 pm = NVI_PM_ON;
1056                         }
1057                 } else if (st->en_msk & MSK_PM_LP) {
1058                         pm = NVI_PM_ON;
1059                 } else if (st->en_msk & MSK_PM_STDBY || st->aux.bypass_lock) {
1060                         pm = NVI_PM_STDBY;
1061                 } else {
1062                         pm = NVI_PM_OFF;
1063                 }
1064         } else {
1065                 pm2 = st->rc.pm2;
1066                 if ((pm_req > NVI_PM_STDBY) && (pm_req < st->pm))
1067                         pm = st->pm;
1068                 else
1069                         pm = pm_req;
1070         }
1071         if (pm == NVI_PM_OFF) {
1072                 for (i = 0; i < AUX_PORT_IO; i++) {
1073                         if (st->aux.port[i].nmp.shutdown_bypass) {
1074                                 nvi_aux_bypass_enable(st, true);
1075                                 pm = NVI_PM_STDBY;
1076                                 break;
1077                         }
1078                 }
1079                 if (st->en_msk & (1 << FW_LOADED))
1080                         pm = NVI_PM_STDBY;
1081         }
1082
1083         switch (pm) {
1084         case NVI_PM_OFF_FORCE:
1085         case NVI_PM_OFF:
1086                 pm = NVI_PM_OFF;
1087         case NVI_PM_STDBY:
1088                 pm1 = BIT_SLEEP;
1089                 pm2 = (BIT_PWR_ACCEL_STBY | BIT_PWR_GYRO_STBY);
1090                 break;
1091
1092         case NVI_PM_ON_CYCLE:
1093                 pm1 = BIT_CYCLE;
1094                 pm2 &= ~BIT_PWR_ACCEL_STBY;
1095                 break;
1096
1097         case NVI_PM_ON:
1098                 pm1 = INV_CLK_INTERNAL;
1099                 if (pm2 & BIT_PWR_ACCEL_STBY) {
1100                         for (i = 0; i < DEV_N_AUX; i++) {
1101                                 if (MSK_PM_ACC_EN & (1 << i)) {
1102                                         if (st->snsr[i].enable) {
1103                                                 pm2 &= ~BIT_PWR_ACCEL_STBY;
1104                                                 break;
1105                                         }
1106                                 }
1107                         }
1108                 }
1109
1110                 break;
1111
1112         case NVI_PM_ON_FULL:
1113                 pm1 = INV_CLK_PLL;
1114                 /* gyro must be turned on before going to PLL clock */
1115                 pm2 &= ~BIT_PWR_GYRO_STBY;
1116                 break;
1117
1118         default:
1119                 dev_err(&st->i2c->dev, "%s %d=>%d ERR=EINVAL\n",
1120                         __func__, st->pm, pm);
1121                 return -EINVAL;
1122         }
1123
1124         if (pm != st->pm || lp != st->rc.lp_config || pm2 != (st->rc.pm2 &
1125                                    (BIT_PWR_ACCEL_STBY | BIT_PWR_GYRO_STBY))) {
1126                 if (pm == NVI_PM_OFF) {
1127                         if (st->pm > NVI_PM_OFF || st->pm == NVI_PM_ERR)
1128                                 ret |= nvi_wr_pm1(st, __func__, BIT_H_RESET);
1129                         ret |= nvi_pm_w(st, pm1, pm2, lp);
1130                         ret |= nvs_vregs_disable(&st->i2c->dev, st->vreg,
1131                                                  ARRAY_SIZE(nvi_vregs));
1132                 } else {
1133                         if (pm == NVI_PM_ON_CYCLE)
1134                                 /* last chance to write to regs before cycle */
1135                                 ret |= nvi_int_able(st, __func__, true);
1136                         ret |= nvi_pm_w(st, pm1, pm2, lp);
1137                         if (pm > NVI_PM_STDBY)
1138                                 mdelay(REG_UP_TIME);
1139                 }
1140                 if (ret < 0) {
1141                         dev_err(&st->i2c->dev, "%s PM %d=>%d ERR=%d\n",
1142                                 __func__, st->pm, pm, ret);
1143                         pm = NVI_PM_ERR;
1144                 }
1145                 if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG))
1146                         dev_info(&st->i2c->dev,
1147                                  "%s-%s PM %d=>%d PM1=%x PM2=%x LP=%x\n",
1148                                  __func__, fn, st->pm, pm, pm1, pm2, lp);
1149                 st->pm = pm;
1150                 if (ret > 0)
1151                         ret = 0;
1152         }
1153         return ret;
1154 }
1155
1156 static void nvi_pm_exit(struct nvi_state *st)
1157 {
1158         if (st->hal)
1159                 nvi_pm(st, __func__, NVI_PM_OFF_FORCE);
1160         nvs_vregs_exit(&st->i2c->dev, st->vreg, ARRAY_SIZE(nvi_vregs));
1161 }
1162
1163 static int nvi_pm_init(struct nvi_state *st)
1164 {
1165         int ret;
1166
1167         ret = nvs_vregs_init(&st->i2c->dev,
1168                              st->vreg, ARRAY_SIZE(nvi_vregs), nvi_vregs);
1169         st->pm = NVI_PM_ERR;
1170         return ret;
1171 }
1172
1173 static int nvi_dmp_fw(struct nvi_state *st)
1174 {
1175 #if NVI_FW_CRC_CHECK
1176         u32 crc32;
1177 #endif /* NVI_FW_CRC_CHECK */
1178         int ret;
1179
1180         if (!st->hal->dmp)
1181                 return -EINVAL;
1182
1183 #if NVI_FW_CRC_CHECK
1184         crc32 = crc32(0, st->hal->dmp->fw, st->hal->dmp->fw_len);
1185         if (crc32 != st->hal->dmp->fw_crc32) {
1186                 dev_err(&st->i2c->dev, "%s FW CRC FAIL %x != %x\n",
1187                          __func__, crc32, st->hal->dmp->fw_crc32);
1188                 return -EINVAL;
1189         }
1190 #endif /* NVI_FW_CRC_CHECK */
1191
1192         ret = nvi_user_ctrl_en(st, __func__, false, false, false, false);
1193         if (ret)
1194                 return ret;
1195
1196         ret = nvi_mem_wr(st, st->hal->dmp->fw_mem_addr,
1197                          st->hal->dmp->fw_len,
1198                          (u8 *)st->hal->dmp->fw, true);
1199         if (ret) {
1200                 dev_err(&st->i2c->dev, "%s ERR: nvi_mem_wr\n", __func__);
1201                 return ret;
1202         }
1203
1204         ret = nvi_i2c_write_rc(st, &st->hal->reg->fw_start,
1205                                st->hal->dmp->fw_start,
1206                                __func__, NULL, true);
1207         if (ret)
1208                 return ret;
1209
1210         ret = st->hal->dmp->fn_init(st); /* nvi_dmp_init */
1211         if (ret) {
1212                 dev_err(&st->i2c->dev, "%s ERR: nvi_dmp_init\n", __func__);
1213                 return ret;
1214         }
1215
1216         nvi_user_ctrl_en(st, __func__, false, false, false, false);
1217         st->en_msk |= (1 << FW_LOADED);
1218         return 0;
1219 }
1220
1221 void nvi_push_delay(struct nvi_state *st)
1222 {
1223         unsigned int i;
1224
1225         for (i = 0; i < DEV_MPU_N; i++) {
1226                 if (st->snsr[i].enable) {
1227                         if (st->snsr[i].push_delay_ns &&
1228                                                     !st->snsr[i].ts_push_delay)
1229                                 st->snsr[i].ts_push_delay = nvs_timestamp() +
1230                                                      st->snsr[i].push_delay_ns;
1231                 } else {
1232                         st->snsr[i].ts_push_delay = 0;
1233                 }
1234         }
1235 }
1236
1237 int nvi_aux_delay(struct nvi_state *st, const char *fn)
1238 {
1239         u8 val;
1240         unsigned int msk_en;
1241         unsigned int src_us;
1242         unsigned int delay;
1243         unsigned int i;
1244         int ret;
1245
1246         /* determine valid delays by ports enabled */
1247         delay = 0;
1248         msk_en = st->snsr[DEV_AUX].enable | st->aux.dmp_en_msk;
1249         for (i = 0; msk_en; i++) {
1250                 if (msk_en & (1 << i)) {
1251                         msk_en &= ~(1 << i);
1252                         if (delay < st->aux.port[i].nmp.delay_ms)
1253                                 delay = st->aux.port[i].nmp.delay_ms;
1254                 }
1255         }
1256         src_us = st->src[st->hal->dev[DEV_AUX]->src].period_us_src;
1257         if (src_us) {
1258                 delay *= 1000; /* ms => us */
1259                 if (delay % src_us) {
1260                         delay /= src_us;
1261                 } else {
1262                         delay /= src_us;
1263                         if (delay)
1264                                 delay--;
1265                 }
1266         } else {
1267                 delay = 0;
1268         }
1269         if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG))
1270                 dev_info(&st->i2c->dev, "%s-%s aux.delay_hw=%u=>%u\n",
1271                          __func__, fn, st->aux.delay_hw, delay);
1272         st->aux.delay_hw = delay;
1273         ret = nvi_wr_i2c_slv4_ctrl(st, (bool)
1274                                    (st->rc.i2c_slv4_ctrl & BIT_SLV_EN));
1275         /* HW port delay enable */
1276         val = BIT_DELAY_ES_SHADOW;
1277         for (i = 0; i < AUX_PORT_MAX; i++) {
1278                 if (st->aux.port[i].nmp.delay_ms)
1279                         val |= (1 << i);
1280         }
1281         ret |= nvi_i2c_wr_rc(st, &st->hal->reg->i2c_mst_delay_ctrl, val,
1282                              __func__, &st->rc.i2c_mst_delay_ctrl);
1283         return ret;
1284 }
1285
1286 static int nvi_timeout(struct nvi_state *st)
1287 {
1288         bool disabled = true;
1289         unsigned int timeout_us = -1;
1290         unsigned int i;
1291
1292         /* find the fastest batch timeout of all the enabled devices */
1293         for (i = 0; i < DEV_N_AUX; i++) {
1294                 if (st->snsr[i].enable) {
1295                         if (st->snsr[i].timeout_us < timeout_us)
1296                                 timeout_us = st->snsr[i].timeout_us;
1297                         disabled = false;
1298                 }
1299         }
1300
1301         disabled = true; /* batch mode is currently disabled */
1302         if (disabled)
1303                 timeout_us = 0; /* batch mode disabled */
1304         if (timeout_us != st->bm_timeout_us) {
1305                 st->bm_timeout_us = timeout_us;
1306                 return 1;
1307         }
1308
1309         return 0;
1310 }
1311
1312 static int nvi_period_src(struct nvi_state *st, int src)
1313 {
1314         bool enabled = false;
1315         unsigned int period_us = -1;
1316         unsigned int dev_msk;
1317         unsigned int i;
1318
1319         if (src < 0)
1320                 return 0;
1321
1322         /* find the fastest period of all the enabled devices */
1323         dev_msk = st->hal->src[src].dev_msk;
1324         for (i = 0; dev_msk; i++) {
1325                 if (dev_msk & (1 << i)) {
1326                         dev_msk &= ~(1 << i);
1327                         if (st->snsr[i].enable && st->snsr[i].period_us) {
1328                                 if (st->snsr[i].period_us < period_us)
1329                                         period_us = st->snsr[i].period_us;
1330                                 enabled = true;
1331                         }
1332                 }
1333         }
1334
1335         if (enabled) {
1336                 if (period_us < st->hal->src[src].period_us_min)
1337                         period_us = st->hal->src[src].period_us_min;
1338                 if (period_us > st->hal->src[src].period_us_max)
1339                         period_us = st->hal->src[src].period_us_max;
1340                 if (period_us != st->src[src].period_us_req) {
1341                         st->src[src].period_us_req = period_us;
1342                         return 1;
1343                 }
1344         }
1345
1346         return 0;
1347 }
1348
1349 int nvi_period_aux(struct nvi_state *st)
1350 {
1351         bool enabled = false;
1352         unsigned int period_us = -1;
1353         unsigned int timeout_us = -1;
1354         unsigned int msk_en;
1355         unsigned int i;
1356         int ret;
1357
1358         msk_en = st->snsr[DEV_AUX].enable | st->aux.dmp_en_msk;
1359         for (i = 0; msk_en; i++) {
1360                 if (msk_en & (1 << i)) {
1361                         msk_en &= ~(1 << i);
1362                         if (st->aux.port[i].period_us) {
1363                                 if (st->aux.port[i].period_us < period_us)
1364                                         period_us = st->aux.port[i].period_us;
1365                                 if (st->aux.port[i].timeout_us < timeout_us)
1366                                         timeout_us =
1367                                                     st->aux.port[i].timeout_us;
1368                                 enabled = true;
1369                         }
1370                 }
1371         }
1372
1373         if (enabled) {
1374                 st->snsr[DEV_AUX].period_us = period_us;
1375                 st->snsr[DEV_AUX].timeout_us = timeout_us;
1376         }
1377         ret = nvi_period_src(st, st->hal->dev[DEV_AUX]->src);
1378         ret |= nvi_timeout(st);
1379         return ret;
1380 }
1381
1382 static int nvi_period_all(struct nvi_state *st)
1383 {
1384         unsigned int src;
1385         int ret = 0;
1386
1387         for (src = 0; src < st->hal->src_n; src++) {
1388                 if (st->hal->src[src].dev_msk & (1 << DEV_AUX))
1389                         continue; /* run nvi_period_aux last for timeout */
1390                 else
1391                         ret |= nvi_period_src(st, src);
1392         }
1393
1394         ret |= nvi_period_aux(st);
1395         return ret;
1396 }
1397
1398 static int nvi_en(struct nvi_state *st)
1399 {
1400         bool dmp_en = false;
1401         unsigned int i;
1402         int ret;
1403         int ret_t = 0;
1404
1405         while (1) {
1406                 if (st->snsr[DEV_GYR].enable) {
1407                         ret_t = nvi_pm(st, __func__, NVI_PM_ON_FULL);
1408                         break;
1409                 }
1410
1411                 for (i = 0; i < DEV_N_AUX; i++) {
1412                         if (st->snsr[i].enable) {
1413                                 ret_t = nvi_pm(st, __func__, NVI_PM_ON);
1414                                 break;
1415                         }
1416                 }
1417                 if (i < DEV_N_AUX)
1418                         break;
1419
1420                 return nvi_pm(st, __func__, NVI_PM_AUTO);
1421         }
1422
1423         ret_t |= nvi_int_able(st, __func__, false);
1424         ret_t |= nvi_user_ctrl_en(st, __func__, false, false, false, false);
1425         if (ret_t) {
1426                 if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG))
1427                         dev_err(&st->i2c->dev, "%s en_msk=%x ERR=%d\n",
1428                                 __func__, st->en_msk, ret_t);
1429                 return ret_t;
1430         }
1431
1432         if (st->en_msk & (1 << FW_LOADED)) {
1433                 /* test if batch is needed or more specifically that an
1434                  * enabled sensor doesn't support batch.  The DMP can't
1435                  * do batch and non-batch at the same time.
1436                  */
1437                 if (st->bm_timeout_us) {
1438                         dmp_en = true;
1439                 } else {
1440                         /* batch disabled - test if a DMP sensor is enabled */
1441                         for (i = 0; i < DEV_N_AUX; i++) {
1442                                 if (st->dmp_en_msk & (1 << i)) {
1443                                         if (st->snsr[i].enable) {
1444                                                 dmp_en = true;
1445                                                 break;
1446                                         }
1447                                 }
1448                         }
1449                 }
1450
1451                 if (dmp_en) {
1452                         ret_t |= st->hal->dmp->fn_en(st); /* nvi_dmp_en */
1453                         st->en_msk |= (1 << DEV_DMP);
1454                         if (ret_t) {
1455                                 /* reprogram for non-DMP mode below */
1456                                 dmp_en = false;
1457                                 if (st->sts & (NVS_STS_SPEW_MSG |
1458                                                NVI_DBG_SPEW_MSG))
1459                                         dev_err(&st->i2c->dev,
1460                                                 "%s DMP ERR=%d\n",
1461                                                 __func__, ret_t);
1462                         } else {
1463                                 if (st->sts & (NVS_STS_SPEW_MSG |
1464                                                NVI_DBG_SPEW_MSG))
1465                                         dev_info(&st->i2c->dev,
1466                                                  "%s DMP enabled\n", __func__);
1467                         }
1468                 }
1469         }
1470         if (!dmp_en) {
1471                 if (st->en_msk & (1 << DEV_DMP)) {
1472                         st->en_msk &= ~(MSK_DEV_SNSR | (1 << DEV_DMP));
1473                         if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG))
1474                                 dev_info(&st->i2c->dev,
1475                                          "%s DMP disabled\n", __func__);
1476                         if (st->aux.dmp_en_msk) {
1477                                 st->aux.dmp_en_msk = 0;
1478                                 nvi_aux_enable(st, __func__, true, true);
1479                         }
1480                         for (i = 0; i < DEV_N_AUX; i++)
1481                                 st->snsr[i].odr = 0;
1482
1483                         for (i = 0; i < AUX_PORT_MAX; i++)
1484                                 st->aux.port[i].odr = 0;
1485                 }
1486
1487                 for (i = 0; i < st->hal->src_n; i++)
1488                         ret_t |= st->hal->src[i].fn_period(st);
1489
1490                 if (st->snsr[DEV_ACC].enable) {
1491                         ret = st->hal->fn->en_acc(st);
1492                         if (ret) {
1493                                 ret_t |= ret;
1494                                 st->en_msk &= ~(1 << DEV_ACC);
1495                         } else {
1496                                 st->en_msk |= (1 << DEV_ACC);
1497                         }
1498                 }
1499                 if (st->snsr[DEV_GYR].enable) {
1500                         ret = st->hal->fn->en_gyr(st);
1501                         if (ret) {
1502                                 ret_t |= ret;
1503                                 st->en_msk &= ~(1 << DEV_GYR);
1504                         } else {
1505                                 st->en_msk |= (1 << DEV_GYR);
1506                         }
1507                 }
1508                 nvi_push_delay(st);
1509                 /* NVI_PM_AUTO to go to NVI_PM_ON_CYCLE if need be */
1510                 /* this also restores correct PM mode if error */
1511                 ret_t |= nvi_pm(st, __func__, NVI_PM_AUTO);
1512                 if (st->pm > NVI_PM_ON_CYCLE)
1513                         ret_t |= nvi_reset(st, __func__, true, false, true);
1514         }
1515         if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG))
1516                 dev_info(&st->i2c->dev, "%s en_msk=%x err=%d\n",
1517                          __func__, st->en_msk, ret_t);
1518         return ret_t;
1519 }
1520
1521 static void nvi_aux_dbg(struct nvi_state *st, char *tag, int val)
1522 {
1523         struct nvi_mpu_port *n;
1524         struct aux_port *p;
1525         struct aux_ports *a;
1526         u8 data[4];
1527         unsigned int i;
1528         int ret;
1529
1530         if (!(st->sts & NVI_DBG_SPEW_AUX))
1531                 return;
1532
1533         dev_info(&st->i2c->dev, "%s %s %d\n", __func__, tag, val);
1534         a = &st->aux;
1535         for (i = 0; i < AUX_PORT_IO; i++) {
1536                 ret = nvi_i2c_rd(st, &st->hal->reg->i2c_slv_addr[i], &data[0]);
1537                 ret |= nvi_i2c_rd(st, &st->hal->reg->i2c_slv_reg[i], &data[1]);
1538                 ret |= nvi_i2c_rd(st, &st->hal->reg->i2c_slv_ctrl[i],
1539                                   &data[2]);
1540                 ret |= nvi_i2c_rd(st, &st->hal->reg->i2c_slv_do[i], &data[3]);
1541                 /* HW = hardware */
1542                 if (ret)
1543                         pr_info("HW: ERR=%d\n", ret);
1544                 else
1545                         pr_info("HW: P%d AD=%x RG=%x CL=%x DO=%x\n",
1546                                 i, data[0], data[1], data[2], data[3]);
1547                 /* RC = hardware register cache */
1548                 pr_info("HC: P%d AD=%x RG=%x CL=%x DO=%x\n",
1549                         i, st->rc.i2c_slv_addr[i], st->rc.i2c_slv_reg[i],
1550                         st->rc.i2c_slv_ctrl[i], st->rc.i2c_slv_do[i]);
1551                 n = &st->aux.port[i].nmp;
1552                 /* NS = nmp structure */
1553                 pr_info("NS: P%d AD=%x RG=%x CL=%x DO=%x MS=%u US=%u SB=%x\n",
1554                         i, n->addr, n->reg, n->ctrl, n->data_out, n->delay_ms,
1555                         st->aux.port[i].period_us, n->shutdown_bypass);
1556                 p = &st->aux.port[i];
1557                 /* PS = port structure */
1558                 pr_info("PS: P%d OFFSET=%u DMP_CTRL=%x EN=%x HWDOUT=%x\n",
1559                         i, p->ext_data_offset, !!(a->dmp_ctrl_msk & (1 << i)),
1560                         !!(st->snsr[DEV_AUX].enable & (1 << i)), p->hw_do);
1561         }
1562
1563         pr_info("AUX: EN=%x MEN=%x DEN=%x DLY=%x SRC=%u DN=%u BEN=%x BLK=%d\n",
1564                 !!(st->en_msk & (1 << DEV_AUX)),
1565                 !!(st->rc.user_ctrl & BIT_I2C_MST_EN), st->aux.dmp_en_msk,
1566                 (st->rc.i2c_slv4_ctrl & BITS_I2C_MST_DLY),
1567                 st->src[st->hal->dev[DEV_AUX]->src].period_us_src,
1568                 a->ext_data_n, (st->rc.int_pin_cfg & BIT_BYPASS_EN),
1569                 a->bypass_lock);
1570 }
1571
1572 static void nvi_aux_ext_data_offset(struct nvi_state *st)
1573 {
1574         unsigned int i;
1575         unsigned int offset = 0;
1576
1577         for (i = 0; i < AUX_PORT_IO; i++) {
1578                 if (st->aux.port[i].nmp.addr & BIT_I2C_READ) {
1579                         st->aux.port[i].ext_data_offset = offset;
1580                         offset += (st->rc.i2c_slv_ctrl[i] &
1581                                    BITS_I2C_SLV_CTRL_LEN);
1582                 }
1583         }
1584         if (offset > AUX_EXT_DATA_REG_MAX) {
1585                 offset = AUX_EXT_DATA_REG_MAX;
1586                 dev_err(&st->i2c->dev,
1587                         "%s ERR MPU slaves exceed data storage\n", __func__);
1588         }
1589         st->aux.ext_data_n = offset;
1590         return;
1591 }
1592
1593 static int nvi_aux_port_data_out(struct nvi_state *st,
1594                                  int port, u8 data_out)
1595 {
1596         int ret;
1597
1598         ret = nvi_i2c_wr_rc(st, &st->hal->reg->i2c_slv_do[port], data_out,
1599                             NULL, &st->rc.i2c_slv_do[port]);
1600         if (!ret) {
1601                 st->aux.port[port].nmp.data_out = data_out;
1602                 st->aux.port[port].hw_do = true;
1603         } else {
1604                 st->aux.port[port].hw_do = false;
1605         }
1606         return ret;
1607 }
1608
1609 static int nvi_aux_port_wr(struct nvi_state *st, int port)
1610 {
1611         struct aux_port *ap;
1612         int ret;
1613
1614         ap = &st->aux.port[port];
1615         ret = nvi_i2c_wr_rc(st, &st->hal->reg->i2c_slv_addr[port],
1616                            ap->nmp.addr, __func__, &st->rc.i2c_slv_addr[port]);
1617         ret |= nvi_i2c_wr_rc(st, &st->hal->reg->i2c_slv_reg[port], ap->nmp.reg,
1618                              __func__, &st->rc.i2c_slv_reg[port]);
1619         ret |= nvi_i2c_wr_rc(st, &st->hal->reg->i2c_slv_do[port],
1620                          ap->nmp.data_out, __func__, &st->rc.i2c_slv_do[port]);
1621         return ret;
1622 }
1623
1624 static int nvi_aux_port_en(struct nvi_state *st, int port, bool en)
1625 {
1626         struct aux_port *ap;
1627         u8 slv_ctrl;
1628         u8 val;
1629         unsigned int dmp_ctrl_msk;
1630         int ret = 0;
1631
1632         ap = &st->aux.port[port];
1633         if (en && !st->rc.i2c_slv_addr[port]) {
1634                 ret = nvi_aux_port_wr(st, port);
1635                 if (!ret)
1636                         ap->hw_do = true;
1637         }
1638         if (en && !ap->hw_do)
1639                 nvi_aux_port_data_out(st, port, ap->nmp.data_out);
1640         if (port == AUX_PORT_IO) {
1641                 ret = nvi_wr_i2c_slv4_ctrl(st, en);
1642         } else {
1643                 slv_ctrl = st->rc.i2c_slv_ctrl[port];
1644                 if (en) {
1645                         dmp_ctrl_msk = st->aux.dmp_ctrl_msk;
1646                         if (st->en_msk & (1 << DEV_DMP)) {
1647                                 val = ap->nmp.dmp_ctrl | BIT_SLV_EN;
1648                                 st->aux.dmp_ctrl_msk |= (1 << port);
1649                         } else {
1650                                 val = ap->nmp.ctrl | BIT_SLV_EN;
1651                                 st->aux.dmp_ctrl_msk &= ~(1 << port);
1652                         }
1653                         if (ap->nmp.dmp_ctrl != ap->nmp.ctrl && dmp_ctrl_msk !=
1654                                                           st->aux.dmp_ctrl_msk)
1655                                 /* AUX HW needs to be reset if slv_ctrl values
1656                                  * change other than enable bit.
1657                                  */
1658                                 st->aux.reset_i2c = true;
1659                 } else {
1660                         val = 0;
1661                         st->aux.dmp_ctrl_msk &= ~(1 << port);
1662                 }
1663                 ret = nvi_i2c_wr_rc(st, &st->hal->reg->i2c_slv_ctrl[port], val,
1664                                     __func__, &st->rc.i2c_slv_ctrl[port]);
1665                 if (slv_ctrl != st->rc.i2c_slv_ctrl[port])
1666                         nvi_aux_ext_data_offset(st);
1667         }
1668         return ret;
1669 }
1670
1671 int nvi_aux_enable(struct nvi_state *st, const char *fn,
1672                    bool en_req, bool force)
1673 {
1674         bool enable = en_req;
1675         bool enabled = false;
1676         bool en;
1677         unsigned int msk_en;
1678         unsigned int i;
1679         int ret = 0;
1680
1681         if (st->rc.int_pin_cfg & BIT_BYPASS_EN)
1682                 enable = false;
1683         /* global enable is honored only if a port is enabled */
1684         msk_en = st->snsr[DEV_AUX].enable | st->aux.dmp_en_msk;
1685         if (!msk_en)
1686                 enable = false;
1687         if (st->en_msk & (1 << DEV_AUX))
1688                 enabled = true;
1689         if (force || enable != enabled) {
1690                 if (enable) {
1691                         st->en_msk |= (1 << DEV_AUX);
1692                         for (i = 0; i < AUX_PORT_MAX; i++) {
1693                                 if (msk_en & (1 << i))
1694                                         en = true;
1695                                 else
1696                                         en = false;
1697                                 ret |= nvi_aux_port_en(st, i, en);
1698                         }
1699                 } else {
1700                         st->en_msk &= ~(1 << DEV_AUX);
1701                         for (i = 0; i < AUX_PORT_MAX; i++) {
1702                                 if (st->rc.i2c_slv_addr[i])
1703                                         nvi_aux_port_en(st, i, false);
1704                         }
1705                 }
1706                 if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG |
1707                                NVI_DBG_SPEW_AUX))
1708                         dev_info(&st->i2c->dev,
1709                                  "%s-%s en_req=%x enabled: %x->%x err=%d\n",
1710                                  __func__, fn, en_req, enabled, enable, ret);
1711         }
1712         return ret;
1713 }
1714
1715 static int nvi_aux_port_enable(struct nvi_state *st,
1716                                unsigned int port_mask, bool en)
1717 {
1718         unsigned int enabled;
1719         unsigned int i;
1720         int ret;
1721
1722         enabled = st->snsr[DEV_AUX].enable;
1723         if (en)
1724                 st->snsr[DEV_AUX].enable |= port_mask;
1725         else
1726                 st->snsr[DEV_AUX].enable &= ~port_mask;
1727         if (enabled == st->snsr[DEV_AUX].enable)
1728                 return 0;
1729
1730         if (st->hal->dev[DEV_AUX]->fifo_en_msk) {
1731                 /* AUX uses FIFO */
1732                 for (i = 0; i < AUX_PORT_IO; i++) {
1733                         if (port_mask & (1 << i)) {
1734                                 if (st->aux.port[i].nmp.addr & BIT_I2C_READ)
1735                                         st->aux.reset_fifo = true;
1736                         }
1737                 }
1738         }
1739         if (en && (st->rc.int_pin_cfg & BIT_BYPASS_EN))
1740                 return 0;
1741
1742         ret = 0;
1743         for (i = 0; i < AUX_PORT_MAX; i++) {
1744                 if (port_mask & (1 << i))
1745                         ret |= nvi_aux_port_en(st, i, en);
1746         }
1747         ret |= nvi_aux_enable(st, __func__, true, false);
1748         nvi_period_aux(st);
1749         if (port_mask & ((1 << AUX_PORT_IO) - 1))
1750                 ret |= nvi_en(st);
1751         return ret;
1752 }
1753
1754 static int nvi_aux_port_free(struct nvi_state *st, int port)
1755 {
1756         memset(&st->aux.port[port], 0, sizeof(struct aux_port));
1757         st->snsr[DEV_AUX].enable &= ~(1 << port);
1758         st->aux.dmp_en_msk &= ~(1 << port);
1759         if (st->rc.i2c_slv_addr[port]) {
1760                 nvi_aux_port_wr(st, port);
1761                 nvi_aux_port_en(st, port, false);
1762                 nvi_aux_enable(st, __func__, false, false);
1763                 nvi_user_ctrl_en(st, __func__, false, false, false, false);
1764                 nvi_aux_enable(st, __func__, true, false);
1765                 if (port != AUX_PORT_IO)
1766                         st->aux.reset_i2c = true;
1767                 nvi_period_aux(st);
1768                 nvi_en(st);
1769         }
1770         return 0;
1771 }
1772
1773 static int nvi_aux_port_alloc(struct nvi_state *st,
1774                               struct nvi_mpu_port *nmp, int port)
1775 {
1776         int i;
1777
1778         if (st->aux.reset_i2c)
1779                 nvi_reset(st, __func__, false, true, true);
1780         if (port < 0) {
1781                 for (i = 0; i < AUX_PORT_IO; i++) {
1782                         if (st->aux.port[i].nmp.addr == 0)
1783                                 break;
1784                 }
1785                 if (i == AUX_PORT_IO)
1786                         return -ENODEV;
1787         } else {
1788                 if (st->aux.port[port].nmp.addr == 0)
1789                         i = port;
1790                 else
1791                         return -ENODEV;
1792         }
1793
1794         memset(&st->aux.port[i], 0, sizeof(struct aux_port));
1795         memcpy(&st->aux.port[i].nmp, nmp, sizeof(struct nvi_mpu_port));
1796         if (!st->aux.port[i].nmp.dmp_ctrl)
1797                 st->aux.port[i].nmp.dmp_ctrl = st->aux.port[i].nmp.ctrl;
1798         st->aux.port[i].period_us = st->aux.port[i].nmp.delay_us;
1799         return i;
1800 }
1801
1802 static int nvi_aux_bypass_enable(struct nvi_state *st, bool en)
1803 {
1804         u8 val;
1805         int ret;
1806
1807         if (en && (st->rc.int_pin_cfg & BIT_BYPASS_EN))
1808                 return 0;
1809
1810         val = st->rc.int_pin_cfg;
1811         if (en) {
1812                 ret = nvi_aux_enable(st, __func__, false, false);
1813                 ret |= nvi_user_ctrl_en(st, __func__,
1814                                         false, false, false, false);
1815                 if (!ret) {
1816                         val |= BIT_BYPASS_EN;
1817                         ret = nvi_i2c_wr_rc(st, &st->hal->reg->int_pin_cfg,
1818                                            val, __func__, &st->rc.int_pin_cfg);
1819                 }
1820         } else {
1821                 val &= ~BIT_BYPASS_EN;
1822                 ret = nvi_i2c_wr_rc(st, &st->hal->reg->int_pin_cfg, val,
1823                                     __func__, &st->rc.int_pin_cfg);
1824                 if (!ret)
1825                         nvi_aux_enable(st, __func__, true, false);
1826         }
1827         nvi_period_aux(st);
1828         nvi_en(st);
1829         return ret;
1830 }
1831
1832 static int nvi_aux_bypass_request(struct nvi_state *st, bool enable)
1833 {
1834         s64 ns;
1835         s64 to;
1836         int ret = 0;
1837
1838         if ((bool)(st->rc.int_pin_cfg & BIT_BYPASS_EN) == enable) {
1839                 st->aux.bypass_timeout_ns = nvs_timestamp();
1840                 st->aux.bypass_lock++;
1841                 if (!st->aux.bypass_lock)
1842                         dev_err(&st->i2c->dev, "%s rollover ERR\n", __func__);
1843         } else {
1844                 if (st->aux.bypass_lock) {
1845                         ns = nvs_timestamp() - st->aux.bypass_timeout_ns;
1846                         to = st->bypass_timeout_ms * 1000000;
1847                         if (ns > to)
1848                                 st->aux.bypass_lock = 0;
1849                         else
1850                                 ret = -EBUSY;
1851                 }
1852                 if (!st->aux.bypass_lock) {
1853                         ret = nvi_aux_bypass_enable(st, enable);
1854                         if (ret)
1855                                 dev_err(&st->i2c->dev, "%s ERR=%d\n",
1856                                         __func__, ret);
1857                         else
1858                                 st->aux.bypass_lock++;
1859                 }
1860         }
1861         return ret;
1862 }
1863
1864 static int nvi_aux_bypass_release(struct nvi_state *st)
1865 {
1866         int ret = 0;
1867
1868         if (st->aux.bypass_lock)
1869                 st->aux.bypass_lock--;
1870         if (!st->aux.bypass_lock) {
1871                 ret = nvi_aux_bypass_enable(st, false);
1872                 if (ret)
1873                         dev_err(&st->i2c->dev, "%s ERR=%d\n", __func__, ret);
1874         }
1875         return ret;
1876 }
1877
1878 static int nvi_aux_dev_valid(struct nvi_state *st,
1879                              struct nvi_mpu_port *nmp, u8 *data)
1880 {
1881         u8 val;
1882         int i;
1883         int ret;
1884
1885         /* turn off bypass */
1886         ret = nvi_aux_bypass_request(st, false);
1887         if (ret)
1888                 return -EBUSY;
1889
1890         /* grab the special port */
1891         ret = nvi_aux_port_alloc(st, nmp, AUX_PORT_IO);
1892         if (ret != AUX_PORT_IO) {
1893                 nvi_aux_bypass_release(st);
1894                 return -EBUSY;
1895         }
1896
1897         /* enable it at fastest speed */
1898         st->aux.port[AUX_PORT_IO].nmp.delay_ms = 0;
1899         st->aux.port[AUX_PORT_IO].period_us =
1900                         st->hal->src[st->hal->dev[DEV_AUX]->src].period_us_min;
1901         ret = nvi_user_ctrl_en(st, __func__, false, false, false, false);
1902         ret |= nvi_aux_port_enable(st, 1 << AUX_PORT_IO, true);
1903         ret |= nvi_user_ctrl_en(st, __func__, false, false, true, false);
1904         if (ret) {
1905                 nvi_aux_port_free(st, AUX_PORT_IO);
1906                 nvi_aux_bypass_release(st);
1907                 return -EBUSY;
1908         }
1909
1910         /* now turn off all the other ports for fastest response */
1911         for (i = 0; i < AUX_PORT_IO; i++) {
1912                 if (st->rc.i2c_slv_addr[i])
1913                         nvi_aux_port_en(st, i, false);
1914         }
1915         /* start reading the results */
1916         for (i = 0; i < AUX_DEV_VALID_READ_LOOP_MAX; i++) {
1917                 mdelay(AUX_DEV_VALID_READ_DELAY_MS);
1918                 val = 0;
1919                 ret = nvi_i2c_rd(st, &st->hal->reg->i2c_mst_status, &val);
1920                 if (ret)
1921                         continue;
1922
1923                 if (val & 0x50)
1924                         break;
1925         }
1926         /* these will restore all previously disabled ports */
1927         nvi_aux_bypass_release(st);
1928         nvi_aux_port_free(st, AUX_PORT_IO);
1929         if (i >= AUX_DEV_VALID_READ_LOOP_MAX)
1930                 return -ENODEV;
1931
1932         if (val & 0x10) /* NACK */
1933                 return -EIO;
1934
1935         if (nmp->addr & BIT_I2C_READ) {
1936                 ret = nvi_i2c_rd(st, &st->hal->reg->i2c_slv4_di, &val);
1937                 if (ret)
1938                         return -EBUSY;
1939
1940                 *data = (u8)val;
1941                 dev_info(&st->i2c->dev, "%s MPU read 0x%x from device 0x%x\n",
1942                         __func__, val, (nmp->addr & ~BIT_I2C_READ));
1943         } else {
1944                 dev_info(&st->i2c->dev, "%s MPU found device 0x%x\n",
1945                         __func__, (nmp->addr & ~BIT_I2C_READ));
1946         }
1947         return 0;
1948 }
1949
1950 static int nvi_aux_mpu_call_pre(struct nvi_state *st, int port)
1951 {
1952         if ((port < 0) || (port >= AUX_PORT_IO))
1953                 return -EINVAL;
1954
1955         if (st->sts & (NVS_STS_SHUTDOWN | NVS_STS_SUSPEND))
1956                 return -EPERM;
1957
1958         if (!st->aux.port[port].nmp.addr)
1959                 return -EINVAL;
1960
1961         return 0;
1962 }
1963
1964 static int nvi_aux_mpu_call_post(struct nvi_state *st,
1965                                  char *tag, int ret)
1966 {
1967         if (ret < 0)
1968                 ret = -EBUSY;
1969         nvi_aux_dbg(st, tag, ret);
1970         return ret;
1971 }
1972
1973 /* See the mpu.h file for details on the nvi_mpu_ calls.
1974  */
1975 int nvi_mpu_dev_valid(struct nvi_mpu_port *nmp, u8 *data)
1976 {
1977         struct nvi_state *st = nvi_state_local;
1978         int ret = -EPERM;
1979
1980         if (st != NULL) {
1981                 if (st->sts & NVI_DBG_SPEW_AUX)
1982                         pr_info("%s\n", __func__);
1983         } else {
1984                 pr_debug("%s ERR -EAGAIN\n", __func__);
1985                 return -EAGAIN;
1986         }
1987
1988         if (nmp == NULL)
1989                 return -EINVAL;
1990
1991         if ((nmp->addr & BIT_I2C_READ) && (data == NULL))
1992                 return -EINVAL;
1993
1994         nvi_mutex_lock(st);
1995         if (!(st->sts & (NVS_STS_SHUTDOWN | NVS_STS_SUSPEND))) {
1996                 nvi_pm(st, __func__, NVI_PM_ON);
1997                 ret = nvi_aux_dev_valid(st, nmp, data);
1998                 nvi_pm(st, __func__, NVI_PM_AUTO);
1999                 nvi_aux_dbg(st, "nvi_mpu_dev_valid=", ret);
2000         }
2001         nvi_mutex_unlock(st);
2002         return ret;
2003 }
2004 EXPORT_SYMBOL(nvi_mpu_dev_valid);
2005
2006 int nvi_mpu_port_alloc(struct nvi_mpu_port *nmp, int port)
2007 {
2008         struct nvi_state *st = nvi_state_local;
2009         int ret = -EPERM;
2010
2011         if (st != NULL) {
2012                 if (st->sts & NVI_DBG_SPEW_AUX)
2013                         pr_info("%s\n", __func__);
2014         } else {
2015                 pr_debug("%s ERR -EAGAIN\n", __func__);
2016                 return -EAGAIN;
2017         }
2018
2019         if (nmp == NULL || !(nmp->ctrl & BITS_I2C_SLV_CTRL_LEN))
2020                 return -EINVAL;
2021
2022         if (port >= AUX_PORT_IO)
2023                 return -EINVAL;
2024
2025         nvi_mutex_lock(st);
2026         if (!(st->sts & (NVS_STS_SHUTDOWN | NVS_STS_SUSPEND))) {
2027                 nvi_pm(st, __func__, NVI_PM_ON);
2028                 ret = nvi_aux_port_alloc(st, nmp, port);
2029                 if (ret >= 0 && st->hal->dmp)
2030                         /* need to reinitialize DMP for new device */
2031                         st->hal->dmp->fn_init(st);
2032                 nvi_pm(st, __func__, NVI_PM_AUTO);
2033                 ret = nvi_aux_mpu_call_post(st, "nvi_mpu_port_alloc=", ret);
2034         }
2035         nvi_mutex_unlock(st);
2036         return ret;
2037 }
2038 EXPORT_SYMBOL(nvi_mpu_port_alloc);
2039
2040 int nvi_mpu_port_free(int port)
2041 {
2042         struct nvi_state *st = nvi_state_local;
2043         int ret;
2044
2045         if (st != NULL) {
2046                 if (st->sts & NVI_DBG_SPEW_AUX)
2047                         pr_info("%s port %d\n", __func__, port);
2048         } else {
2049                 pr_debug("%s port %d ERR -EAGAIN\n", __func__, port);
2050                 return -EAGAIN;
2051         }
2052
2053         nvi_mutex_lock(st);
2054         ret = nvi_aux_mpu_call_pre(st, port);
2055         if (!ret) {
2056                 nvi_pm(st, __func__, NVI_PM_ON);
2057                 ret = nvi_aux_port_free(st, port);
2058                 nvi_pm(st, __func__, NVI_PM_AUTO);
2059                 ret = nvi_aux_mpu_call_post(st, "nvi_mpu_port_free=", ret);
2060         }
2061         nvi_mutex_unlock(st);
2062         return ret;
2063 }
2064 EXPORT_SYMBOL(nvi_mpu_port_free);
2065
2066 int nvi_mpu_enable(unsigned int port_mask, bool enable)
2067 {
2068         struct nvi_state *st = nvi_state_local;
2069         unsigned int i;
2070         int ret;
2071
2072         if (st != NULL) {
2073                 if (st->sts & NVI_DBG_SPEW_AUX)
2074                         pr_info("%s port_mask %x: %x\n",
2075                                 __func__, port_mask, enable);
2076         } else {
2077                 pr_debug("%s port_mask %x: %x ERR -EAGAIN\n",
2078                          __func__, port_mask, enable);
2079                 return -EAGAIN;
2080         }
2081
2082         if (port_mask >= (1 << AUX_PORT_IO) || !port_mask)
2083                 return -EINVAL;
2084
2085         for (i = 0; i < AUX_PORT_IO; i++) {
2086                 if (port_mask & (1 << i)) {
2087                         if (!st->aux.port[i].nmp.addr)
2088                                 return -EINVAL;
2089                 }
2090         }
2091
2092         nvi_mutex_lock(st);
2093         if (st->sts & (NVS_STS_SHUTDOWN | NVS_STS_SUSPEND)) {
2094                 ret = -EPERM;
2095         } else {
2096                 nvi_pm(st, __func__, NVI_PM_ON);
2097                 ret = nvi_aux_port_enable(st, port_mask, enable);
2098                 ret = nvi_aux_mpu_call_post(st, "nvi_mpu_enable=", ret);
2099         }
2100         nvi_mutex_unlock(st);
2101         return ret;
2102 }
2103 EXPORT_SYMBOL(nvi_mpu_enable);
2104
2105 int nvi_mpu_delay_ms(int port, u8 delay_ms)
2106 {
2107         struct nvi_state *st = nvi_state_local;
2108         int ret;
2109
2110         if (st != NULL) {
2111                 if (st->sts & NVI_DBG_SPEW_AUX)
2112                         pr_info("%s port %d: %u\n", __func__, port, delay_ms);
2113         } else {
2114                 pr_debug("%s port %d: %u ERR -EAGAIN\n",
2115                          __func__, port, delay_ms);
2116                 return -EAGAIN;
2117         }
2118
2119         nvi_mutex_lock(st);
2120         ret = nvi_aux_mpu_call_pre(st, port);
2121         if (!ret) {
2122                 st->aux.port[port].nmp.delay_ms = delay_ms;
2123                 if (st->rc.i2c_slv_ctrl[port] & BIT_SLV_EN)
2124                         ret = nvi_aux_delay(st, __func__);
2125                 ret = nvi_aux_mpu_call_post(st, "nvi_mpu_delay_ms=", ret);
2126         }
2127         nvi_mutex_unlock(st);
2128         return ret;
2129 }
2130 EXPORT_SYMBOL(nvi_mpu_delay_ms);
2131
2132 int nvi_mpu_data_out(int port, u8 data_out)
2133 {
2134         struct nvi_state *st = nvi_state_local;
2135         int ret;
2136
2137         if (st == NULL)
2138                 return -EAGAIN;
2139
2140         ret = nvi_aux_mpu_call_pre(st, port);
2141         if (!ret) {
2142                 if (st->rc.i2c_slv_ctrl[port] & BIT_SLV_EN) {
2143                         ret = nvi_aux_port_data_out(st, port, data_out);
2144                 } else {
2145                         st->aux.port[port].nmp.data_out = data_out;
2146                         st->aux.port[port].hw_do = false;
2147                 }
2148                 if (ret < 0)
2149                         ret = -EBUSY;
2150         }
2151         return ret;
2152 }
2153 EXPORT_SYMBOL(nvi_mpu_data_out);
2154
2155 int nvi_mpu_batch(int port, unsigned int period_us, unsigned int timeout_us)
2156 {
2157         struct nvi_state *st = nvi_state_local;
2158         int ret;
2159
2160         if (st != NULL) {
2161                 if (st->sts & NVI_DBG_SPEW_AUX)
2162                         pr_info("%s port %d: p=%u t=%u\n",
2163                                 __func__, port, period_us, timeout_us);
2164         } else {
2165                 pr_debug("%s port %d: p=%u t=%u ERR -EAGAIN\n",
2166                         __func__, port, period_us, timeout_us);
2167                 return -EAGAIN;
2168         }
2169
2170         nvi_mutex_lock(st);
2171         ret = nvi_aux_mpu_call_pre(st, port);
2172         if (!ret) {
2173                 if (timeout_us && ((st->aux.port[port].nmp.id == ID_INVALID) ||
2174                               (st->aux.port[port].nmp.id >= ID_INVALID_END))) {
2175                         /* sensor not supported by DMP */
2176                         ret = -EINVAL;
2177                 } else {
2178                         st->aux.port[port].period_us = period_us;
2179                         st->aux.port[port].timeout_us = timeout_us;
2180                         ret = nvi_period_aux(st);
2181                         if (st->en_msk & (1 << DEV_DMP) &&
2182                                                   st->hal->dmp->fn_dev_batch) {
2183                                 /* batch can be done real-time with DMP on */
2184                                 /* nvi_dd_batch */
2185                                 ret = st->hal->dmp->fn_dev_batch(st, DEV_AUX,
2186                                                                  port);
2187                         } else {
2188                                 if (ret > 0)
2189                                         /* timings changed */
2190                                         ret = nvi_en(st);
2191                         }
2192                         ret = nvi_aux_mpu_call_post(st, "nvi_mpu_batch=", ret);
2193                 }
2194         }
2195         nvi_mutex_unlock(st);
2196         return ret;
2197 }
2198 EXPORT_SYMBOL(nvi_mpu_batch);
2199
2200 int nvi_mpu_flush(int port)
2201 {
2202         struct nvi_state *st = nvi_state_local;
2203         int ret;
2204
2205         if (st != NULL) {
2206                 if (st->sts & NVI_DBG_SPEW_AUX)
2207                         pr_info("%s port %d\n", __func__, port);
2208         } else {
2209                 pr_debug("%s port %d ERR -EAGAIN\n", __func__, port);
2210                 return -EAGAIN;
2211         }
2212
2213         nvi_mutex_lock(st);
2214         ret = nvi_aux_mpu_call_pre(st, port);
2215         if (!ret) {
2216                 if (st->hal->dev[DEV_AUX]->fifo_en_msk) {
2217                         /* HW flush only when FIFO is used for AUX */
2218                         st->aux.port[port].flush = true;
2219                         ret = nvi_read(st, true);
2220                 } else {
2221                         nvi_flush_aux(st, port);
2222                 }
2223                 ret = nvi_aux_mpu_call_post(st, "nvi_mpu_flush=", ret);
2224         }
2225         nvi_mutex_unlock(st);
2226         return ret;
2227 }
2228 EXPORT_SYMBOL(nvi_mpu_flush);
2229
2230 int nvi_mpu_fifo(int port, unsigned int *reserve, unsigned int *max)
2231 {
2232         struct nvi_state *st = nvi_state_local;
2233         int ret;
2234
2235         if (st != NULL) {
2236                 if (st->sts & NVI_DBG_SPEW_AUX)
2237                         pr_info("%s port %d\n", __func__, port);
2238         } else {
2239                 pr_debug("%s port %d ERR -EAGAIN\n", __func__, port);
2240                 return -EAGAIN;
2241         }
2242
2243         nvi_mutex_lock(st);
2244         ret = nvi_aux_mpu_call_pre(st, port);
2245         if (!ret) {
2246                 if ((st->aux.port[port].nmp.id != ID_INVALID) &&
2247                         (st->aux.port[port].nmp.id < ID_INVALID_END)) {
2248                         if (reserve)
2249                                 /* batch not supported at this time */
2250                                 *reserve = 0;
2251                         if (max)
2252                                 /* batch not supported at this time */
2253                                 *max = 0;
2254                         ret = nvi_aux_mpu_call_post(st, "nvi_mpu_fifo=", 0);
2255                 } else {
2256                         ret = -EINVAL;
2257                 }
2258         }
2259         nvi_mutex_unlock(st);
2260         return ret;
2261 }
2262 EXPORT_SYMBOL(nvi_mpu_fifo);
2263
2264 int nvi_mpu_bypass_request(bool enable)
2265 {
2266         struct nvi_state *st = nvi_state_local;
2267         int ret = -EPERM;
2268
2269         if (st != NULL) {
2270                 if (st->sts & NVI_DBG_SPEW_AUX)
2271                         pr_info("%s enable=%x\n", __func__, enable);
2272         } else {
2273                 pr_debug("%s ERR -EAGAIN\n", __func__);
2274                 return -EAGAIN;
2275         }
2276
2277         nvi_mutex_lock(st);
2278         if (!(st->sts & (NVS_STS_SHUTDOWN | NVS_STS_SUSPEND))) {
2279                 nvi_pm(st, __func__, NVI_PM_ON);
2280                 ret = nvi_aux_bypass_request(st, enable);
2281                 nvi_pm(st, __func__, NVI_PM_AUTO);
2282                 ret = nvi_aux_mpu_call_post(st, "nvi_mpu_bypass_request=",
2283                                             ret);
2284         }
2285         nvi_mutex_unlock(st);
2286         return ret;
2287 }
2288 EXPORT_SYMBOL(nvi_mpu_bypass_request);
2289
2290 int nvi_mpu_bypass_release(void)
2291 {
2292         struct nvi_state *st = nvi_state_local;
2293
2294         if (st != NULL) {
2295                 if (st->sts & NVI_DBG_SPEW_AUX)
2296                         pr_info("%s\n", __func__);
2297         } else {
2298                 pr_debug("%s\n", __func__);
2299                 return 0;
2300         }
2301
2302         nvi_mutex_lock(st);
2303         if (!(st->sts & (NVS_STS_SHUTDOWN | NVS_STS_SUSPEND))) {
2304                 nvi_pm(st, __func__, NVI_PM_ON);
2305                 nvi_aux_bypass_release(st);
2306                 nvi_pm(st, __func__, NVI_PM_AUTO);
2307                 nvi_aux_mpu_call_post(st, "nvi_mpu_bypass_release", 0);
2308         }
2309         nvi_mutex_unlock(st);
2310         return 0;
2311 }
2312 EXPORT_SYMBOL(nvi_mpu_bypass_release);
2313
2314
2315 int nvi_reset(struct nvi_state *st, const char *fn,
2316               bool rst_fifo, bool rst_i2c, bool en_irq)
2317 {
2318         s64 ts;
2319         u8 val;
2320         bool rst_dmp = false;
2321         unsigned int i;
2322         int ret;
2323
2324         ret = nvi_int_able(st, __func__, false);
2325         val = 0;
2326         if (rst_i2c || st->aux.reset_i2c) {
2327                 st->aux.reset_i2c = false;
2328                 rst_i2c = true;
2329                 ret |= nvi_aux_enable(st, __func__, false, false);
2330                 val |= BIT_I2C_MST_RST;
2331         }
2332         if (rst_fifo) {
2333                 st->aux.reset_fifo = false;
2334                 val |= BIT_FIFO_RST;
2335                 if (st->en_msk & (1 << DEV_DMP)) {
2336                         val |= BIT_DMP_RST;
2337                         rst_dmp = true;
2338                         ret |= nvi_aux_enable(st, __func__, false, false);
2339                 }
2340         }
2341         ret |= nvi_user_ctrl_en(st, __func__,
2342                                 !rst_fifo, !rst_fifo, !rst_i2c, false);
2343         val |= st->rc.user_ctrl;
2344         ret |= nvi_user_ctrl_rst(st, val);
2345         if (rst_i2c || rst_dmp)
2346                 ret |= nvi_aux_enable(st, __func__, true, false);
2347         ts = nvs_timestamp();
2348         if (rst_fifo) {
2349                 for (i = 0; i < st->hal->src_n; i++) {
2350                         st->src[i].ts_reset = true;
2351                         st->src[i].ts_1st = ts;
2352                         st->src[i].ts_end = ts;
2353                         st->src[i].ts_period = st->src[i].period_us_src * 1000;
2354                 }
2355
2356                 for (i = 0; i < DEV_N_AUX; i++) {
2357                         st->snsr[i].ts_reset = true;
2358                         st->snsr[i].ts_last = ts;
2359                         st->snsr[i].ts_n = 0;
2360                 }
2361
2362                 for (i = 0; i < AUX_PORT_MAX; i++) {
2363                         st->aux.port[i].ts_reset = true;
2364                         st->aux.port[i].ts_last = ts;
2365                 }
2366
2367                 if (st->hal->dmp) {
2368                         /* nvi_dmp_clk_n */
2369                         ret |= st->hal->dmp->fn_clk_n(st, &st->dmp_clk_n);
2370                         st->src[SRC_DMP].ts_reset = true;
2371                         st->src[SRC_DMP].ts_1st = ts;
2372                         st->src[SRC_DMP].ts_end = ts;
2373                         st->src[SRC_DMP].ts_period =
2374                                          st->src[SRC_DMP].period_us_src * 1000;
2375                 }
2376         }
2377
2378         ret |= nvi_user_ctrl_en(st, __func__, true, true, true, en_irq);
2379         if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG |
2380                        NVI_DBG_SPEW_FIFO | NVI_DBG_SPEW_TS))
2381                 dev_info(&st->i2c->dev,
2382                          "%s-%s DMP=%x FIFO=%x I2C=%x ts=%lld err=%d\n",
2383                          __func__, fn, rst_dmp, rst_fifo, rst_i2c, ts, ret);
2384         return ret;
2385 }
2386
2387 s64 nvi_ts_dev(struct nvi_state *st, s64 ts_now,
2388                unsigned int dev, unsigned int aux_port)
2389 {
2390         s64 ts;
2391         int src;
2392
2393         if (ts_now) {
2394                 if (st->en_msk & (1 << DEV_DMP))
2395                         src = SRC_DMP;
2396                 else
2397                         src = st->hal->dev[dev]->src;
2398         } else {
2399                 src = -1;
2400         }
2401         if (src < 0) {
2402                 ts = nvs_timestamp();
2403         } else {
2404                 if (dev == DEV_AUX && aux_port < AUX_PORT_MAX) {
2405                         if (st->aux.port[aux_port].ts_reset) {
2406                                 st->aux.port[aux_port].ts_reset = false;
2407                                 ts = st->src[src].ts_1st;
2408                         } else {
2409                                 ts = st->src[src].ts_period;
2410                                 if (st->aux.port[aux_port].odr)
2411                                         ts *= (st->aux.port[aux_port].odr + 1);
2412                                 ts += st->aux.port[aux_port].ts_last;
2413                         }
2414                 } else {
2415                         if (st->snsr[dev].ts_reset) {
2416                                 st->snsr[dev].ts_reset = false;
2417                                 ts = st->src[src].ts_1st;
2418                         } else {
2419                                 ts = st->src[src].ts_period;
2420                                 if (st->snsr[dev].odr)
2421                                         ts *= (st->snsr[dev].odr + 1);
2422                                 ts += st->snsr[dev].ts_last;
2423                         }
2424                 }
2425                 if (ts > ts_now) {
2426                         if (st->sts & (NVI_DBG_SPEW_FIFO | NVI_DBG_SPEW_TS))
2427                                 dev_info(&st->i2c->dev,
2428                                          "%s ts > ts_now (%lld > %lld)\n",
2429                                          __func__, ts, ts_now);
2430                         ts = ts_now;
2431                 }
2432         }
2433         if (dev == DEV_AUX && aux_port < AUX_PORT_MAX) {
2434                 if (ts < st->aux.port[aux_port].ts_last)
2435                         ts = -1;
2436                 else
2437                         st->aux.port[aux_port].ts_last = ts;
2438         } else {
2439                 if (ts < st->snsr[dev].ts_last)
2440                         ts = -1;
2441                 else
2442                         st->snsr[dev].ts_last = ts;
2443         }
2444         if (ts < st->snsr[dev].ts_push_delay)
2445                 ts = -1;
2446         if (st->sts & NVI_DBG_SPEW_FIFO && src >= 0)
2447                 dev_info(&st->i2c->dev,
2448                          "src[%d] ts_period=%lld ts_end=%lld %s ts[%u]=%lld\n",
2449                          src, st->src[src].ts_period, st->src[src].ts_end,
2450                          st->snsr[dev].cfg.name, st->snsr[dev].ts_n, ts);
2451         st->snsr[dev].ts_n++;
2452         return ts;
2453 }
2454
2455 static void nvi_aux_rd(struct nvi_state *st)
2456 {
2457         s64 ts;
2458         u8 *p;
2459         struct aux_port *ap;
2460         unsigned int len;
2461         unsigned int i;
2462         int ret;
2463
2464         if ((!st->aux.ext_data_n) || (!(st->rc.user_ctrl & BIT_I2C_MST_EN)))
2465                 return;
2466
2467         ret = nvi_i2c_r(st, st->hal->reg->ext_sens_data_00.bank,
2468                         st->hal->reg->ext_sens_data_00.reg,
2469                         st->aux.ext_data_n, (u8 *)&st->aux.ext_data);
2470         if (ret)
2471                 return;
2472
2473         ts = nvi_ts_dev(st, 0, DEV_AUX, -1);
2474         for (i = 0; i < AUX_PORT_IO; i++) {
2475                 ap = &st->aux.port[i];
2476                 if ((st->rc.i2c_slv_ctrl[i] & BIT_SLV_EN) &&
2477                                                (ap->nmp.addr & BIT_I2C_READ) &&
2478                                                    (ap->nmp.handler != NULL)) {
2479                         p = &st->aux.ext_data[ap->ext_data_offset];
2480                         len = ap->nmp.ctrl & BITS_I2C_SLV_CTRL_LEN;
2481                         ap->nmp.handler(p, len, ts, ap->nmp.ext_driver);
2482                 }
2483         }
2484 }
2485
2486 static s32 nvi_matrix(struct nvi_state *st, signed char *matrix,
2487                       s32 x, s32 y, s32 z, unsigned int axis)
2488 {
2489         return ((matrix[0 + axis] == 1 ? x :
2490                  (matrix[0 + axis] == -1 ? -x : 0)) +
2491                 (matrix[3 + axis] == 1 ? y :
2492                  (matrix[3 + axis] == -1 ? -y : 0)) +
2493                 (matrix[6 + axis] == 1 ? z :
2494                  (matrix[6 + axis] == -1 ? -z : 0)));
2495 }
2496
2497 int nvi_push(struct nvi_state *st, unsigned int dev, u8 *buf, s64 ts)
2498 {
2499         u8 buf_le[20];
2500         s32 val_le[4];
2501         s32 val[AXIS_N];
2502         u32 u_val;
2503         unsigned int sts;
2504         unsigned int buf_le_i;
2505         unsigned int ch;
2506         unsigned int ch_sz;
2507         unsigned int m;
2508         unsigned int n;
2509         int i;
2510
2511         ch_sz = abs(st->snsr[dev].cfg.ch_sz);
2512         m = 0;
2513         if (st->snsr[dev].buf_n) {
2514                 n = st->snsr[dev].buf_n / st->snsr[dev].cfg.ch_n;
2515                 m = st->snsr[dev].buf_n % st->snsr[dev].cfg.ch_n;
2516                 if (m)
2517                         n++;
2518         } else {
2519                 n = ch_sz;
2520         }
2521         /* convert big endian byte stream to little endian channel data */
2522         for (ch = 0; ch < st->snsr[dev].cfg.ch_n; ch++) {
2523                 val_le[ch] = 0;
2524                 if (st->snsr[dev].enable & (1 << ch)) {
2525                         if (m && ch == (st->snsr[dev].cfg.ch_n - 1)) {
2526                                 /* handle last channel misalignment */
2527                                 for (i = 0; i < m; i++) {
2528                                         val_le[ch] <<= 8;
2529                                         val_le[ch] |= (u8)*buf++;
2530                                 }
2531                                 /* extend sign bit */
2532                                 i = (sizeof(val_le[ch]) - m) * 8;
2533                                 val_le[ch] <<= i;
2534                                 val_le[ch] >>= i;
2535                         } else {
2536                                 for (i = 0; i < n; i++) {
2537                                         val_le[ch] <<= 8;
2538                                         val_le[ch] |= (u8)*buf++;
2539                                 }
2540                                 /* extend sign bit */
2541                                 i = (sizeof(val_le[ch]) - n) * 8;
2542                                 if (i) {
2543                                         val_le[ch] <<= i;
2544                                         val_le[ch] >>= i;
2545                                 }
2546                         }
2547                 }
2548         }
2549
2550         /* shift HW data size to channel size if needed */
2551         if (st->snsr[dev].buf_shft) {
2552                 if (st->snsr[dev].buf_shft < 0) {
2553                         n = abs(st->snsr[dev].buf_shft);
2554                         for (ch = 0; ch < st->snsr[dev].cfg.ch_n; ch++)
2555                                 val_le[ch] >>= n;
2556                 } else {
2557                         for (ch = 0; ch < st->snsr[dev].cfg.ch_n; ch++)
2558                                 val_le[ch] <<= st->snsr[dev].buf_shft;
2559                 }
2560         }
2561
2562         /* apply matrix if needed */
2563         if (st->snsr[dev].matrix) {
2564                 for (ch = 0; ch < AXIS_N; ch++)
2565                         val[ch] = val_le[ch];
2566
2567                 for (ch = 0; ch < AXIS_N; ch++)
2568                         val_le[ch] = nvi_matrix(st, st->snsr[dev].cfg.matrix,
2569                                                 val[AXIS_X], val[AXIS_Y],
2570                                                 val[AXIS_Z], ch);
2571         }
2572
2573         /* convert little endian channel data to little endian byte stream */
2574         buf_le_i = 0;
2575         for (ch = 0; ch < st->snsr[dev].cfg.ch_n; ch++) {
2576                 u_val = (u32)val_le[ch];
2577                 for (i = 0; i < ch_sz; i++) {
2578                         buf_le[buf_le_i + i] = (u8)(u_val & 0xFF);
2579                         u_val >>= 8;
2580                 }
2581                 buf_le_i += ch_sz;
2582         }
2583
2584         /* add status if needed (no endian conversion) */
2585         if (buf_le_i < st->snsr[dev].cfg.snsr_data_n) {
2586                 n = st->snsr[dev].cfg.snsr_data_n - buf_le_i;
2587                 u_val = st->snsr[dev].sts;
2588                 for (i = 0; i < n; i++) {
2589                         buf_le[buf_le_i + i] = (u8)(u_val & 0xFF);
2590                         u_val >>= 8;
2591                 }
2592         }
2593
2594         if (ts >= 0) {
2595                 if (st->sts & (NVI_DBG_SPEW_SNSR << dev)) {
2596                         sts = st->sts;
2597                         st->sts |= NVS_STS_SPEW_DATA;
2598                         st->nvs->handler(st->snsr[dev].nvs_st, buf_le, ts);
2599                         if (!(sts & NVS_STS_SPEW_DATA))
2600                                 st->sts &= ~NVS_STS_SPEW_DATA;
2601                 } else {
2602                         st->nvs->handler(st->snsr[dev].nvs_st, buf_le, ts);
2603                 }
2604         }
2605         return buf_le_i;
2606 }
2607
2608 static int nvi_push_event(struct nvi_state *st, unsigned int dev)
2609 {
2610         s64 ts = nvs_timestamp();
2611         u8 val = 1;
2612         unsigned int sts;
2613         int ret;
2614
2615         if (st->sts & (NVI_DBG_SPEW_SNSR << dev)) {
2616                 sts = st->sts;
2617                 st->sts |= NVS_STS_SPEW_DATA;
2618                 ret = st->nvs->handler(st->snsr[dev].nvs_st, &val, ts);
2619                 if (!(sts & NVS_STS_SPEW_DATA))
2620                         st->sts &= ~NVS_STS_SPEW_DATA;
2621         } else {
2622                 ret = st->nvs->handler(st->snsr[dev].nvs_st, &val, ts);
2623         }
2624         return ret;
2625 }
2626
2627 static int nvi_push_oneshot(struct nvi_state *st, unsigned int dev)
2628 {
2629         /* disable now to avoid reinitialization on handler's disable */
2630         st->snsr[dev].enable = 0;
2631         st->en_msk &= ~(1 << dev);
2632         return nvi_push_event(st, dev);
2633 }
2634
2635 static int nvi_dev_rd(struct nvi_state *st, unsigned int dev)
2636 {
2637         u8 buf[AXIS_N * 2];
2638         u16 len;
2639         int ret;
2640
2641         if (!st->snsr[dev].enable)
2642                 return 0;
2643
2644         len = st->snsr[dev].cfg.ch_n << 1;
2645         ret = nvi_i2c_r(st, st->hal->reg->out_h[dev].bank,
2646                         st->hal->reg->out_h[dev].reg, len, buf);
2647         if (!ret)
2648                 ret = nvi_push(st, dev, buf, nvi_ts_dev(st, 0, dev, 0));
2649         return ret;
2650 }
2651
2652 static int nvi_fifo_aux(struct nvi_state *st, s64 ts, unsigned int n)
2653 {
2654         struct aux_port *ap;
2655         unsigned int fifo_data_n;
2656         unsigned int port;
2657
2658         ts = nvi_ts_dev(st, ts, DEV_AUX, -1);
2659         for (port = 0; port < AUX_PORT_IO; port++) {
2660                 ap = &st->aux.port[port];
2661                 if (st->rc.fifo_en & (1 << st->hal->bit->slv_fifo_en[port])) {
2662                         fifo_data_n = ap->nmp.ctrl & BITS_I2C_SLV_CTRL_LEN;
2663                         if (fifo_data_n > n)
2664                                 return 0;
2665
2666                         ap->nmp.handler(&st->buf[st->buf_i], fifo_data_n, ts,
2667                                         ap->nmp.ext_driver);
2668                         st->buf_i += fifo_data_n;
2669                         n -= fifo_data_n;
2670                 }
2671                 if (st->sts & (NVS_STS_SUSPEND | NVS_STS_SHUTDOWN))
2672                         return -1;
2673         }
2674
2675         return 1;
2676 }
2677
2678 static int nvi_fifo_dev_rd(struct nvi_state *st, s64 ts, unsigned int n,
2679                            unsigned int dev)
2680 {
2681         if (st->sts & (NVS_STS_SUSPEND | NVS_STS_SHUTDOWN))
2682                 return -1;
2683
2684         if (st->hal->dev[dev]->fifo_data_n > n)
2685                 return 0;
2686
2687         nvi_push(st, dev, &st->buf[st->buf_i], nvi_ts_dev(st, ts, dev, 0));
2688         st->buf_i += st->hal->dev[dev]->fifo_data_n;
2689         return 1;
2690 }
2691
2692 static int nvi_fifo_dev(struct nvi_state *st, s64 ts, unsigned int n)
2693 {
2694         unsigned int dev;
2695         int ret;
2696
2697         dev = st->hal->fifo_dev[(st->rc.fifo_cfg >> 2) & 0x07];
2698         if (dev == DEV_AUX)
2699                 ret = nvi_fifo_aux(st, ts, n);
2700         else
2701                 ret = nvi_fifo_dev_rd(st, ts, n, dev);
2702         return ret;
2703 }
2704
2705 static int nvi_fifo_devs(struct nvi_state *st, s64 ts, unsigned int n)
2706 {
2707         unsigned int dev;
2708         int ret = 0;
2709
2710         for (dev = 0; dev < DEV_MPU_N; dev++) {
2711                 if (st->rc.fifo_en & st->hal->dev[dev]->fifo_en_msk) {
2712                         ret = nvi_fifo_dev_rd(st, ts, n, dev);
2713                         if (ret <= 0)
2714                                 return ret;
2715                 }
2716         }
2717
2718         if (st->rc.fifo_en & st->hal->dev[DEV_AUX]->fifo_en_msk)
2719                 ret = nvi_fifo_aux(st, ts, n);
2720         return ret;
2721 }
2722
2723 /* fifo_n_max can be used if we want to round-robin FIFOs */
2724 static int nvi_fifo_rd(struct nvi_state *st, int src, unsigned int fifo_n_max,
2725                        int (*fn)(struct nvi_state *st, s64 ts, unsigned int n))
2726 {
2727         u16 fifo_count;
2728         u32 dmp_clk_n = 0;
2729         s64 ts_period;
2730         s64 ts_now;
2731         s64 ts_end;
2732         bool sync;
2733         unsigned int ts_n;
2734         unsigned int fifo_n;
2735         unsigned int buf_n;
2736         int ret = 0;
2737
2738         ts_end = nvs_timestamp();
2739         if (src < 0)
2740                 /* nvi_dmp_clk_n */
2741                 ret = st->hal->dmp->fn_clk_n(st, &dmp_clk_n);
2742         ret |= nvi_i2c_rd(st, &st->hal->reg->fifo_count_h, (u8 *)&fifo_count);
2743         if (ret || !fifo_count)
2744                 return 0;
2745
2746         ts_now = nvs_timestamp();
2747         if (ts_now < (ts_end + 5000000))
2748                 sync = true;
2749         else
2750                 sync = false;
2751         ts_end = atomic64_read(&st->ts_irq);
2752         fifo_n = (unsigned int)be16_to_cpu(fifo_count);
2753         if (st->sts & NVS_STS_SPEW_IRQ)
2754                 dev_info(&st->i2c->dev,
2755                          "src=%d sync=%x fifo_n=%u ts_clk_n=%u ts_diff=%lld\n",
2756                          src, sync, fifo_n, dmp_clk_n, ts_now - st->ts_now);
2757         st->ts_now = ts_now;
2758         if (src < 0) {
2759                 /* DMP timing */
2760                 if (dmp_clk_n > st->dmp_clk_n)
2761                         ts_n = dmp_clk_n - st->dmp_clk_n;
2762                 else
2763                         /* counter rolled over */
2764                         ts_n = (~st->dmp_clk_n + 1) + dmp_clk_n;
2765                 /* ts_n is the number of DMP clock ticks since last time */
2766                 st->dmp_clk_n = dmp_clk_n;
2767                 src = SRC_DMP;
2768                 fifo_n_max = 0; /* DMP disables round-robin FIFOs */
2769         } else {
2770                 /* FIFO timing */
2771                 ts_n = fifo_n / st->src[src].fifo_data_n; /* TS's needed */
2772                 if ((fifo_n % st->src[src].fifo_data_n) || !ts_n)
2773                         /* reset FIFO if doesn't divide cleanly */
2774                         return -1;
2775         }
2776
2777         if (ts_n) {
2778                 ts_period = st->src[src].period_us_src * 1000;
2779                 if (sync && ts_end > st->src[src].ts_end && ts_end < ts_now &&
2780                                           ts_end > (ts_now - (ts_period >> 2)))
2781                         /* ts_irq is within the rate so sync to IRQ */
2782                         ts_now = ts_end;
2783                 if (st->src[src].ts_reset) {
2784                         st->src[src].ts_reset = false;
2785                         ts_end = st->src[src].ts_period * (ts_n - 1);
2786                         if (sync) {
2787                                 st->src[src].ts_1st = ts_now - ts_end;
2788                                 st->src[src].ts_end = st->src[src].ts_1st;
2789                         }
2790                 } else {
2791                         ts_end = st->src[src].ts_period * ts_n;
2792                 }
2793                 ts_end += st->src[src].ts_end;
2794                 /* ts_now will be sent to nvi_ts_dev where the timestamp is
2795                  * prevented from going into the future which allows some
2796                  * tolerance here for ts_end being a little more than ts_now.
2797                  * The more tolerance we have the less recalculating the period
2798                  * to avoid swing around the true period.  Plus, the clamp on
2799                  * ts_now in nvi_ts_dev has the benefit of "syncing" with the
2800                  * current calculations per device.
2801                  */
2802                 if (ts_end > (ts_now + (ts_period >> 3)) || (sync && (ts_end <
2803                                                (ts_now - (ts_period >> 1))))) {
2804                         if (st->sts & (NVI_DBG_SPEW_FIFO | NVI_DBG_SPEW_TS)) {
2805                                 dev_info(&st->i2c->dev,
2806                                          "sync=%x now=%lld end=%lld ts_n=%u\n",
2807                                          sync, ts_now, ts_end, ts_n);
2808                                 dev_info(&st->i2c->dev,
2809                                          "src=%d old period=%lld end=%lld\n",
2810                                          src, st->src[src].ts_period,
2811                                          st->src[src].ts_end);
2812                         }
2813                         /* st->src[src].ts_period needs to be adjusted */
2814                         ts_period = ts_now - st->src[src].ts_end;
2815                         do_div(ts_period, ts_n);
2816                         st->src[src].ts_period = ts_period;
2817                         ts_end = ts_period * ts_n;
2818                         ts_end += st->src[src].ts_end;
2819                         if (st->sts & (NVI_DBG_SPEW_FIFO | NVI_DBG_SPEW_TS))
2820                                 dev_info(&st->i2c->dev,
2821                                          "src=%d new period=%lld end=%lld\n",
2822                                          src, ts_period, ts_end);
2823                 }
2824                 if (fifo_n_max) {
2825                         /* would only apply to FIFO timing (non-DMP) */
2826                         if (fifo_n_max < fifo_n) {
2827                                 fifo_n = fifo_n_max;
2828                                 ts_n = fifo_n / st->src[src].fifo_data_n;
2829                                 ts_end = st->src[src].ts_period * ts_n;
2830                                 ts_end += st->src[src].ts_end;
2831                         }
2832                 }
2833                 st->src[src].ts_end = ts_end;
2834         } else {
2835                 /* wasn't able to calculate TS */
2836                 ts_now = 0;
2837         }
2838
2839         while (fifo_n) {
2840                 buf_n = sizeof(st->buf) - st->buf_i;
2841                 if (buf_n > fifo_n)
2842                         buf_n = fifo_n;
2843                 ret = nvi_i2c_r(st, st->hal->reg->fifo_rw.bank,
2844                                 st->hal->reg->fifo_rw.reg,
2845                                 buf_n, &st->buf[st->buf_i]);
2846                 if (ret)
2847                         return 0;
2848
2849                 fifo_n -= buf_n;
2850                 buf_n += st->buf_i;
2851                 st->buf_i = 0;
2852                 /* fn updates st->buf_i */
2853                 while (st->buf_i < buf_n) {
2854                         ret = fn(st, ts_now, buf_n - st->buf_i);
2855                         /* ret < 0: error to exit
2856                          * ret = 0: not enough data to process
2857                          * ret > 0: all done processing data
2858                          */
2859                         if (ret <= 0)
2860                                 break;
2861                 }
2862
2863                 buf_n -= st->buf_i;
2864                 if (buf_n) {
2865                         memcpy(st->buf, &st->buf[st->buf_i], buf_n);
2866                         st->buf_i = buf_n;
2867                 } else {
2868                         st->buf_i = 0;
2869                 }
2870                 if (ret < 0)
2871                         break;
2872         }
2873
2874         return ret;
2875 }
2876
2877 static int nvi_rd(struct nvi_state *st)
2878 {
2879         u8 val;
2880         u32 int_msk;
2881         unsigned int fifo;
2882         int src;
2883         int ret;
2884
2885         if (st->en_msk & (1 << DEV_DMP)) {
2886                 if (st->en_msk & ((1 << DEV_SM) | (1 << DEV_STP))) {
2887                         ret = nvi_i2c_rd(st, &st->hal->reg->int_dmp, &val);
2888                         if (val & (1 << st->hal->bit->dmp_int_sm))
2889                                 nvi_push_oneshot(st, DEV_SM);
2890                         if (val & (1 << st->hal->bit->dmp_int_stp))
2891                                 nvi_push_event(st, DEV_STP);
2892                 }
2893                 if (st->en_msk & st->dmp_en_msk)
2894                         /* nvi_dmp_rd */
2895                         return nvi_fifo_rd(st, -1, 0, st->hal->dmp->fn_rd);
2896
2897                 nvi_en(st);
2898                 return 0;
2899         }
2900
2901         if (st->pm == NVI_PM_ON_CYCLE) {
2902                 /* only low power accelerometer data */
2903                 nvi_pm(st, __func__, NVI_PM_ON);
2904                 ret = nvi_dev_rd(st, DEV_ACC);
2905                 nvi_pm(st, __func__, NVI_PM_AUTO);
2906                 return 0;
2907         }
2908
2909         nvi_dev_rd(st, DEV_TMP);
2910         if (!(st->rc.fifo_en & st->hal->dev[DEV_AUX]->fifo_en_msk))
2911                 nvi_aux_rd(st);
2912         /* handle FIFO enabled data */
2913         if (st->rc.fifo_cfg & 0x01) {
2914                 /* multi FIFO enabled */
2915                 int_msk = 1 << st->hal->bit->int_data_rdy_0;
2916                 for (fifo = 0; fifo < st->hal->fifo_n; fifo++) {
2917                         if (st->rc.int_enable & (int_msk << fifo)) {
2918                                 ret = nvi_wr_fifo_cfg(st, fifo);
2919                                 if (ret)
2920                                         return 0;
2921
2922                                 src = st->hal->dev[st->hal->
2923                                                    fifo_dev[fifo]]->src;
2924                                 ret = nvi_fifo_rd(st, src, 0, nvi_fifo_dev);
2925                                 if (st->buf_i || (ret < 0)) {
2926                                         /* HW FIFO misalignment - reset */
2927                                         nvi_err(st);
2928                                         return -1;
2929                                 }
2930                         }
2931                 }
2932         } else {
2933                 /* st->fifo_src is either SRC_MPU or the source for the single
2934                  * device enabled for the single FIFO in ICM.
2935                  */
2936                 ret = nvi_fifo_rd(st, st->fifo_src, 0, nvi_fifo_devs);
2937                 if (st->buf_i || (ret < 0)) {
2938                         /* HW FIFO misalignment - reset */
2939                         nvi_err(st);
2940                         return -1;
2941                 }
2942         }
2943
2944         return 0;
2945 }
2946
2947 static int nvi_read(struct nvi_state *st, bool flush)
2948 {
2949         int ret;
2950
2951         if (st->irq_dis && !(st->sts & NVS_STS_SHUTDOWN)) {
2952                 dev_err(&st->i2c->dev, "%s ERR: IRQ storm reset. n=%u\n",
2953                         __func__, st->irq_storm_n);
2954                 st->irq_storm_n = 0;
2955                 nvi_pm(st, __func__, NVI_PM_ON);
2956                 nvi_wr_pm1(st, __func__, BIT_H_RESET);
2957                 nvi_enable_irq(st);
2958                 nvi_en(st);
2959         } else if (!(st->sts & (NVS_STS_SUSPEND | NVS_STS_SHUTDOWN))) {
2960                 ret = nvi_rd(st);
2961                 if (ret < 0)
2962                         nvi_en(st); /* a little harder reset for ICM DMP */
2963                 else if (flush)
2964                         nvi_reset(st, __func__, true, false, true);
2965         } else if (flush) {
2966                 nvi_flush_push(st);
2967         }
2968         return 0;
2969 }
2970
2971 static irqreturn_t nvi_thread(int irq, void *dev_id)
2972 {
2973         struct nvi_state *st = (struct nvi_state *)dev_id;
2974
2975         nvi_mutex_lock(st);
2976         nvi_read(st, false);
2977         nvi_mutex_unlock(st);
2978         return IRQ_HANDLED;
2979 }
2980
2981 static irqreturn_t nvi_handler(int irq, void *dev_id)
2982 {
2983         struct nvi_state *st = (struct nvi_state *)dev_id;
2984         u64 ts = nvs_timestamp();
2985         u64 ts_old = atomic64_xchg(&st->ts_irq, ts);
2986         u64 ts_diff = ts - ts_old;
2987
2988         /* test for MPU IRQ storm problem */
2989         if (ts_diff < NVI_IRQ_STORM_MIN_NS) {
2990                 st->irq_storm_n++;
2991                 if (st->irq_storm_n > NVI_IRQ_STORM_MAX_N)
2992                         nvi_disable_irq(st);
2993         } else {
2994                 st->irq_storm_n = 0;
2995         }
2996
2997         if (st->sts & NVS_STS_SPEW_IRQ)
2998                 dev_info(&st->i2c->dev, "%s ts=%llu ts_diff=%llu irq_dis=%x\n",
2999                          __func__, ts, ts_diff, st->irq_dis);
3000         return IRQ_WAKE_THREAD;
3001 }
3002
3003 static int nvi_enable(void *client, int snsr_id, int enable)
3004 {
3005         struct nvi_state *st = (struct nvi_state *)client;
3006
3007         if (enable < 0)
3008                 /* return current enable request status */
3009                 return st->snsr[snsr_id].enable;
3010
3011         if (st->snsr[snsr_id].enable == enable)
3012                 /* nothing has changed with enable request */
3013                 return 0;
3014
3015         st->snsr[snsr_id].enable = enable;
3016         if (!enable)
3017                 /* officially flagged as off here */
3018                 st->en_msk &= ~(1 << snsr_id);
3019         if (st->sts & NVS_STS_SUSPEND)
3020                 /* speed up suspend/resume by not doing nvi_en for every dev */
3021                 return 0;
3022
3023         if (snsr_id == DEV_TMP)
3024                 /* this is a static sensor that will be read when gyro is on */
3025                 return 0;
3026
3027         if (st->en_msk & (1 << DEV_DMP)) {
3028                 /* DMP is currently on */
3029                 if (!(st->en_msk & st->dmp_en_msk))
3030                         /* DMP may get turned off (may stay on due to batch) so
3031                          * we update timings that may have changed while DMP
3032                          * was on.
3033                          */
3034                         nvi_period_all(st);
3035         } else {
3036                 nvi_period_src(st, st->hal->dev[snsr_id]->src);
3037                 nvi_timeout(st);
3038         }
3039         return nvi_en(st);
3040 }
3041
3042 static int nvi_batch(void *client, int snsr_id, int flags,
3043                      unsigned int period, unsigned int timeout)
3044 {
3045         struct nvi_state *st = (struct nvi_state *)client;
3046         int ret;
3047
3048         if (timeout && !st->snsr[snsr_id].cfg.fifo_max_evnt_cnt)
3049                 return -EINVAL;
3050
3051         if (snsr_id == DEV_TMP)
3052                 return 0;
3053
3054         if (period == st->snsr[snsr_id].period_us &&
3055                                        timeout == st->snsr[snsr_id].timeout_us)
3056                 return 0;
3057
3058         st->snsr[snsr_id].period_us = period;
3059         st->snsr[snsr_id].timeout_us = timeout;
3060         if (!st->snsr[snsr_id].enable)
3061                 return 0;
3062
3063         ret = nvi_timeout(st);
3064         if (st->en_msk & (1 << DEV_DMP)) {
3065                 if (st->hal->dmp->fn_dev_batch)
3066                         /* batch can be done in real-time with the DMP on */
3067                         /* nvi_dd_batch */
3068                         ret = st->hal->dmp->fn_dev_batch(st, snsr_id, -1);
3069                 else
3070                         ret = nvi_en(st);
3071         } else {
3072                 ret |= nvi_period_src(st, st->hal->dev[snsr_id]->src);
3073                 if (ret > 0)
3074                         ret = nvi_en(st);
3075         }
3076
3077         return ret;
3078 }
3079
3080 static int nvi_flush(void *client, int snsr_id)
3081 {
3082         struct nvi_state *st = (struct nvi_state *)client;
3083         int ret = -EINVAL;
3084
3085         if (st->snsr[snsr_id].enable) {
3086                 st->snsr[snsr_id].flush = true;
3087                 ret = nvi_read(st, true);
3088         }
3089         return ret;
3090 }
3091
3092 static int nvi_max_range(void *client, int snsr_id, int max_range)
3093 {
3094         struct nvi_state *st = (struct nvi_state *)client;
3095         unsigned int i = max_range;
3096         unsigned int ch;
3097
3098         if (snsr_id < 0 || snsr_id >= DEV_N)
3099                 return -EINVAL;
3100
3101         if (st->snsr[snsr_id].enable)
3102                 /* can't change settings on the fly (disable device first) */
3103                 return -EPERM;
3104
3105         if (i > st->hal->dev[snsr_id]->rr_0n)
3106                 /* clamp to highest setting */
3107                 i = st->hal->dev[snsr_id]->rr_0n;
3108         st->snsr[snsr_id].usr_cfg = i;
3109         st->snsr[snsr_id].cfg.resolution.ival =
3110                                   st->hal->dev[snsr_id]->rr[i].resolution.ival;
3111         st->snsr[snsr_id].cfg.resolution.fval =
3112                                   st->hal->dev[snsr_id]->rr[i].resolution.fval;
3113         st->snsr[snsr_id].cfg.max_range.ival =
3114                                    st->hal->dev[snsr_id]->rr[i].max_range.ival;
3115         st->snsr[snsr_id].cfg.max_range.fval =
3116                                    st->hal->dev[snsr_id]->rr[i].max_range.fval;
3117         st->snsr[snsr_id].cfg.offset.ival = st->hal->dev[snsr_id]->offset.ival;
3118         st->snsr[snsr_id].cfg.offset.fval = st->hal->dev[snsr_id]->offset.fval;
3119         st->snsr[snsr_id].cfg.scale.ival = st->hal->dev[snsr_id]->scale.ival;
3120         st->snsr[snsr_id].cfg.scale.fval = st->hal->dev[snsr_id]->scale.fval;
3121         /* AXIS sensors need resolution put in the scales */
3122         if (st->snsr[snsr_id].cfg.ch_n_max) {
3123                 for (ch = 0; ch < st->snsr[snsr_id].cfg.ch_n_max; ch++) {
3124                         st->snsr[snsr_id].cfg.scales[ch].ival =
3125                                          st->snsr[snsr_id].cfg.resolution.ival;
3126                         st->snsr[snsr_id].cfg.scales[ch].fval =
3127                                          st->snsr[snsr_id].cfg.resolution.fval;
3128                 }
3129         }
3130
3131         return 0;
3132 }
3133
3134 static int nvi_offset(void *client, int snsr_id, int channel, int offset)
3135 {
3136         struct nvi_state *st = (struct nvi_state *)client;
3137         int old;
3138         int ret;
3139
3140         if (snsr_id >= DEV_AXIS_N || channel >= AXIS_N)
3141                 return -EINVAL;
3142
3143         old = st->dev_offset[snsr_id][channel];
3144         st->dev_offset[snsr_id][channel] = offset;
3145         if (st->en_msk & (1 << snsr_id)) {
3146                 ret = nvi_en(st);
3147                 if (ret) {
3148                         st->dev_offset[snsr_id][channel] = old;
3149                         return -EINVAL;
3150                 }
3151         }
3152
3153         return 0;
3154 }
3155
3156 static int nvi_thresh_lo(void *client, int snsr_id, int thresh_lo)
3157 {
3158         struct nvi_state *st = (struct nvi_state *)client;
3159         int ret = 1;
3160
3161         switch (snsr_id) {
3162         case DEV_ACC:
3163                 return 0;
3164
3165         case DEV_SM:
3166                 st->snsr[DEV_SM].cfg.thresh_lo = thresh_lo;
3167                 if (st->en_msk & (1 << DEV_DMP))
3168                         ret = st->hal->dmp->fn_dev_init(st, snsr_id);
3169                 return ret;
3170
3171         default:
3172                 return -EINVAL;
3173         }
3174
3175         return ret;
3176 }
3177
3178 static int nvi_thresh_hi(void *client, int snsr_id, int thresh_hi)
3179 {
3180         struct nvi_state *st = (struct nvi_state *)client;
3181         int ret = 1;
3182
3183         switch (snsr_id) {
3184         case DEV_ACC:
3185                 if (thresh_hi > 0)
3186                         st->en_msk |= (1 << EN_LP);
3187                 else
3188                         st->en_msk &= ~(1 << EN_LP);
3189                 return 1;
3190
3191         case DEV_SM:
3192                 st->snsr[DEV_SM].cfg.thresh_hi = thresh_hi;
3193                 if (st->en_msk & (1 << DEV_DMP))
3194                         ret = st->hal->dmp->fn_dev_init(st, snsr_id);
3195                 return ret;
3196
3197         default:
3198                 return -EINVAL;
3199         }
3200
3201         return ret;
3202 }
3203
3204 static int nvi_reset_dev(void *client, int snsr_id)
3205 {
3206         struct nvi_state *st = (struct nvi_state *)client;
3207         int ret;
3208
3209         ret = nvi_pm(st, __func__, NVI_PM_ON);
3210         ret |= nvi_wr_pm1(st, __func__, BIT_H_RESET);
3211         nvi_period_all(st);
3212         ret |= nvi_en(st);
3213         return ret;
3214 }
3215
3216 static int nvi_self_test(void *client, int snsr_id, char *buf)
3217 {
3218         struct nvi_state *st = (struct nvi_state *)client;
3219         int ret;
3220
3221         nvi_pm(st, __func__, NVI_PM_ON);
3222         nvi_aux_enable(st, __func__, false, false);
3223         nvi_user_ctrl_en(st, __func__, false, false, false, false);
3224         if (snsr_id == DEV_ACC)
3225                 ret = st->hal->fn->st_acc(st);
3226         else if (snsr_id == DEV_GYR)
3227                 ret = st->hal->fn->st_gyr(st);
3228         else
3229                 ret = 0;
3230         nvi_aux_enable(st, __func__, true, false);
3231         nvi_period_all(st);
3232         nvi_en(st);
3233         if (ret)
3234                 return sprintf(buf, "%d   FAIL\n", ret);
3235
3236         return sprintf(buf, "%d   PASS\n", ret);
3237 }
3238
3239 static int nvi_regs(void *client, int snsr_id, char *buf)
3240 {
3241         struct nvi_state *st = (struct nvi_state *)client;
3242         ssize_t t;
3243         u8 data;
3244         unsigned int i;
3245         unsigned int j;
3246         int ret;
3247
3248         t = sprintf(buf, "registers: (only data != 0 shown)\n");
3249         for (j = 0; j < st->hal->reg_bank_n; j++) {
3250                 t += sprintf(buf + t, "bank %u:\n", j);
3251                 for (i = 0; i < st->hal->regs_n; i++) {
3252                         if ((j == st->hal->reg->fifo_rw.bank) &&
3253                                               (i == st->hal->reg->fifo_rw.reg))
3254                                 continue;
3255
3256                         ret = nvi_i2c_r(st, j, i, 1, &data);
3257                         if (ret)
3258                                 t += sprintf(buf + t, "0x%02x=ERR\n", i);
3259                         else if (data)
3260                                 t += sprintf(buf + t,
3261                                              "0x%02x=0x%02x\n", i, data);
3262                 }
3263         }
3264         return t;
3265 }
3266
3267 static int nvi_nvs_write(void *client, int snsr_id, unsigned int nvs)
3268 {
3269         struct nvi_state *st = (struct nvi_state *)client;
3270
3271         switch (nvs & 0xFF) {
3272         case NVI_INFO_VER:
3273         case NVI_INFO_DBG:
3274         case NVI_INFO_REG_WR:
3275         case NVI_INFO_MEM_RD:
3276         case NVI_INFO_MEM_WR:
3277         case NVI_INFO_DMP_FW:
3278         case NVI_INFO_DMP_EN_MSK:
3279                 break;
3280
3281         case NVI_INFO_DBG_SPEW:
3282                 st->sts ^= NVI_DBG_SPEW_MSG;
3283                 break;
3284
3285         case NVI_INFO_AUX_SPEW:
3286                 st->sts ^= NVI_DBG_SPEW_AUX;
3287                 nvi_aux_dbg(st, "SNAPSHOT", 0);
3288                 break;
3289
3290         case NVI_INFO_FIFO_SPEW:
3291                 st->sts ^= NVI_DBG_SPEW_FIFO;
3292                 break;
3293
3294         case NVI_INFO_TS_SPEW:
3295                 st->sts ^= NVI_DBG_SPEW_TS;
3296                 break;
3297
3298         default:
3299                 if (nvs < (NVI_INFO_SNSR_SPEW + DEV_N))
3300                         st->sts ^= (NVI_DBG_SPEW_SNSR <<
3301                                     (nvs - NVI_INFO_SNSR_SPEW));
3302                 else
3303                         return -EINVAL;
3304         }
3305
3306         st->info = nvs;
3307         return 0;
3308 }
3309
3310 static int nvi_nvs_read(void *client, int snsr_id, char *buf)
3311 {
3312         struct nvi_state *st = (struct nvi_state *)client;
3313         u8 buf_rw[256];
3314         unsigned int n;
3315         unsigned int i;
3316         unsigned int info;
3317         int ret;
3318         ssize_t t;
3319
3320         info = st->info;
3321         st->info = NVI_INFO_VER;
3322         switch (info & 0xFF) {
3323         case NVI_INFO_VER:
3324                 t = sprintf(buf, "NVI driver v. %u\n", NVI_DRIVER_VERSION);
3325                 if (st->en_msk & (1 << FW_LOADED)) {
3326                         t += sprintf(buf + t, "DMP FW v. %u\n",
3327                                      st->hal->dmp->fw_ver);
3328                         t += sprintf(buf + t, "DMP enabled=%u\n",
3329                                      !!(st->en_msk & (1 << DEV_DMP)));
3330                 }
3331                 t += sprintf(buf + t, "standby_en=%x\n",
3332                              !!(st->en_msk & (1 << EN_STDBY)));
3333                 t += sprintf(buf + t, "bypass_timeout_ms=%u\n",
3334                              st->bypass_timeout_ms);
3335                 for (i = 0; i < DEV_N_AUX; i++) {
3336                         if (st->snsr[i].push_delay_ns)
3337                                 t += sprintf(buf + t,
3338                                              "%s_push_delay_ns=%lld\n",
3339                                              st->snsr[i].cfg.name,
3340                                              st->snsr[i].push_delay_ns);
3341                 }
3342
3343                 for (i = 0; i < DEV_N_AUX; i++) {
3344                         if ((st->dmp_dev_msk | MSK_DEV_MPU_AUX) & (1 << i)) {
3345                                 if (st->dmp_en_msk & (1 << i))
3346                                         t += sprintf(buf + t, "%s_dmp_en=1\n",
3347                                                      st->snsr[i].cfg.name);
3348                                 else
3349                                         t += sprintf(buf + t, "%s_dmp_en=0\n",
3350                                                      st->snsr[i].cfg.name);
3351                         }
3352                 }
3353
3354                 return t;
3355
3356         case NVI_INFO_DBG:
3357                 t = sprintf(buf, "en_msk=%x\n", st->en_msk);
3358                 t += sprintf(buf + t, "sts=%x\n", st->sts);
3359                 t += sprintf(buf + t, "pm=%d\n", st->pm);
3360                 t += sprintf(buf + t, "bm_timeout_us=%u\n", st->bm_timeout_us);
3361                 t += sprintf(buf + t, "fifo_src=%d\n", st->fifo_src);
3362                 for (i = 0; i < DEV_N_AUX; i++) {
3363                         t += sprintf(buf + t, "snsr[%u] %s:\n",
3364                                      i, st->snsr[i].cfg.name);
3365                         t += sprintf(buf + t, "enable=%x\n",
3366                                      st->snsr[i].enable);
3367                         t += sprintf(buf + t, "period_us=%u\n",
3368                                      st->snsr[i].period_us);
3369                         t += sprintf(buf + t, "timeout_us=%u\n",
3370                                      st->snsr[i].timeout_us);
3371                         t += sprintf(buf + t, "odr=%u\n",
3372                                      st->snsr[i].odr);
3373                         t += sprintf(buf + t, "ts_last=%lld\n",
3374                                      st->snsr[i].ts_last);
3375                         t += sprintf(buf + t, "ts_reset=%x\n",
3376                                      st->snsr[i].ts_reset);
3377                         t += sprintf(buf + t, "flush=%x\n",
3378                                      st->snsr[i].flush);
3379                         t += sprintf(buf + t, "matrix=%x\n",
3380                                      st->snsr[i].matrix);
3381                         t += sprintf(buf + t, "buf_shft=%d\n",
3382                                      st->snsr[i].buf_shft);
3383                         t += sprintf(buf + t, "buf_n=%u\n",
3384                                      st->snsr[i].buf_n);
3385                 }
3386
3387                 if (st->hal->dmp) {
3388                         /* nvi_dmp_clk_n */
3389                         st->hal->dmp->fn_clk_n(st, &n);
3390                         t += sprintf(buf + t, "nvi_dmp_clk_n=%u\n", n);
3391                         t += sprintf(buf + t, "st->dmp_clk_n=%u\n",
3392                                      st->dmp_clk_n);
3393                         n = SRC_DMP;
3394                 } else {
3395                         n = 0;
3396                 }
3397                 for (i = 0; i < SRC_N; i++) {
3398                         if (i >= st->hal->src_n && i != SRC_DMP)
3399                                 continue;
3400
3401                         t += sprintf(buf + t, "src[%u]:\n", i);
3402                         t += sprintf(buf + t, "ts_reset=%x\n",
3403                                      st->src[i].ts_reset);
3404                         t += sprintf(buf + t, "ts_end=%lld\n",
3405                                      st->src[i].ts_end);
3406                         t += sprintf(buf + t, "ts_period=%lld\n",
3407                                      st->src[i].ts_period);
3408                         t += sprintf(buf + t, "period_us_src=%u\n",
3409                                      st->src[i].period_us_src);
3410                         t += sprintf(buf + t, "period_us_req=%u\n",
3411                                      st->src[i].period_us_req);
3412                         t += sprintf(buf + t, "fifo_data_n=%u\n",
3413                                      st->src[i].fifo_data_n);
3414                         t += sprintf(buf + t, "base_t=%u\n",
3415                                      st->src[i].base_t);
3416                 }
3417                 return t;
3418
3419         case NVI_INFO_DBG_SPEW:
3420                 return sprintf(buf, "DBG spew=%x\n",
3421                                !!(st->sts & NVI_DBG_SPEW_MSG));
3422
3423         case NVI_INFO_AUX_SPEW:
3424                 return sprintf(buf, "AUX spew=%x\n",
3425                                !!(st->sts & NVI_DBG_SPEW_AUX));
3426
3427         case NVI_INFO_FIFO_SPEW:
3428                 return sprintf(buf, "FIFO spew=%x\n",
3429                                !!(st->sts & NVI_DBG_SPEW_FIFO));
3430
3431         case NVI_INFO_TS_SPEW:
3432                 return sprintf(buf, "TS spew=%x\n",
3433                                !!(st->sts & NVI_DBG_SPEW_TS));
3434
3435         case NVI_INFO_REG_WR:
3436                 st->rc_dis = true;
3437                 buf_rw[0] = (u8)(info >> 16);
3438                 buf_rw[1] = (u8)(info >> 8);
3439                 ret = nvi_i2c_write(st, info >> 24, 2, buf_rw);
3440                 return sprintf(buf, "REG WR: b=%02x r=%02x d=%02x ERR=%d\n",
3441                                info >> 24, buf_rw[0], buf_rw[1], ret);
3442
3443         case NVI_INFO_MEM_RD:
3444                 n = (info >> 8) & 0xFF;
3445                 if (!n)
3446                         n = sizeof(buf_rw);
3447                 ret = nvi_mem_rd(st, info >> 16, n, buf_rw);
3448                 if (ret)
3449                         return sprintf(buf, "MEM RD: ERR=%d\n", ret);
3450
3451                 t = sprintf(buf, "MEM RD:\n");
3452                 for (i = 0; i < n; i++) {
3453                         if (!(i % 8))
3454                                 t += sprintf(buf + t, "%04x: ",
3455                                              (info >> 16) + i);
3456                         t += sprintf(buf + t, "%02x ", buf_rw[i]);
3457                         if (!((i + 1) % 8))
3458                                 t += sprintf(buf + t, "\n");
3459                 }
3460                 t += sprintf(buf + t, "\n");
3461                 return t;
3462
3463         case NVI_INFO_MEM_WR:
3464                 st->mc_dis = true;
3465                 buf_rw[0] = (u8)(info >> 8);
3466                 ret = nvi_mem_wr(st, info >> 16, 1, buf_rw, true);
3467                 return sprintf(buf, "MEM WR: a=%04x d=%02x ERR=%d\n",
3468                                info >> 16, buf_rw[0], ret);
3469
3470         case NVI_INFO_DMP_FW:
3471                 ret = nvi_dmp_fw(st);
3472                 return sprintf(buf, "DMP FW: ERR=%d\n", ret);
3473
3474         case NVI_INFO_DMP_EN_MSK:
3475                 st->dmp_en_msk = (info >> 8) & MSK_DEV_ALL;
3476                 return sprintf(buf, "st->dmp_en_msk=%x\n", st->dmp_en_msk);
3477
3478         default:
3479                 i = info - NVI_INFO_SNSR_SPEW;
3480                 if (i < DEV_N)
3481                         return sprintf(buf, "%s spew=%x\n",
3482                                        st->snsr[i].cfg.name,
3483                                        !!(st->sts & (NVI_DBG_SPEW_SNSR << i)));
3484                 break;
3485         }
3486
3487         return -EINVAL;
3488 }
3489
3490 static struct nvs_fn_dev nvi_nvs_fn = {
3491         .enable                         = nvi_enable,
3492         .batch                          = nvi_batch,
3493         .flush                          = nvi_flush,
3494         .max_range                      = nvi_max_range,
3495         .offset                         = nvi_offset,
3496         .thresh_lo                      = nvi_thresh_lo,
3497         .thresh_hi                      = nvi_thresh_hi,
3498         .reset                          = nvi_reset_dev,
3499         .self_test                      = nvi_self_test,
3500         .regs                           = nvi_regs,
3501         .nvs_write                      = nvi_nvs_write,
3502         .nvs_read                       = nvi_nvs_read,
3503 };
3504
3505
3506 static int nvi_suspend(struct device *dev)
3507 {
3508         struct i2c_client *client = to_i2c_client(dev);
3509         struct nvi_state *st = i2c_get_clientdata(client);
3510         unsigned int i;
3511         int ret;
3512         int ret_t = 0;
3513         s64 ts = 0; /* = 0 to fix compile */
3514
3515         if (st->sts & NVS_STS_SPEW_MSG)
3516                 ts = nvs_timestamp();
3517         st->sts |= NVS_STS_SUSPEND;
3518         if (st->nvs) {
3519                 for (i = 0; i < DEV_N; i++)
3520                         ret_t |= st->nvs->suspend(st->snsr[i].nvs_st);
3521         }
3522
3523         nvi_mutex_lock(st);
3524         ret_t |= nvi_en(st);
3525         for (i = 0; i < DEV_N; i++) {
3526                 if (st->snsr[i].enable && (st->snsr[i].cfg.flags &
3527                                            SENSOR_FLAG_WAKE_UP)) {
3528                         ret = irq_set_irq_wake(st->i2c->irq, 1);
3529                         if (!ret) {
3530                                 st->irq_set_irq_wake = true;
3531                                 break;
3532                         }
3533                 }
3534         }
3535         if (st->sts & NVS_STS_SPEW_MSG)
3536                 dev_info(&client->dev,
3537                          "%s WAKE_ON=%x elapsed_t=%lldns err=%d\n", __func__,
3538                          st->irq_set_irq_wake, nvs_timestamp() - ts, ret_t);
3539         nvi_mutex_unlock(st);
3540         return ret_t;
3541 }
3542
3543 static int nvi_resume(struct device *dev)
3544 {
3545         struct i2c_client *client = to_i2c_client(dev);
3546         struct nvi_state *st = i2c_get_clientdata(client);
3547         s64 ts = 0; /* = 0 to fix compile */
3548         unsigned int i;
3549         int ret;
3550
3551         if (st->sts & NVS_STS_SPEW_MSG)
3552                 ts = nvs_timestamp();
3553         nvi_mutex_lock(st);
3554         if (st->irq_set_irq_wake) {
3555                 /* determine if wake source */
3556                 ret = nvi_rd_int_status(st);
3557                 if (ret) {
3558                         dev_err(&client->dev, "%s IRQ STS ERR=%d\n",
3559                                 __func__, ret);
3560                 } else {
3561                         if (st->sts & NVS_STS_SPEW_MSG)
3562                                 dev_info(&client->dev,
3563                                          "%s IRQ STS=%#x DMP=%#x\n", __func__,
3564                                          st->rc.int_status, st->rc.int_dmp);
3565                         if (st->rc.int_status & (1 << st->hal->bit->int_dmp)) {
3566                                 if (st->rc.int_dmp &
3567                                                (1 << st->hal->bit->dmp_int_sm))
3568                                         nvi_push_oneshot(st, DEV_SM);
3569                         }
3570                 }
3571                 ret = irq_set_irq_wake(st->i2c->irq, 0);
3572                 if (!ret)
3573                         st->irq_set_irq_wake = false;
3574         }
3575         nvi_mutex_unlock(st);
3576         ret = 0;
3577         if (st->nvs) {
3578                 for (i = 0; i < DEV_N; i++)
3579                         ret |= st->nvs->resume(st->snsr[i].nvs_st);
3580         }
3581
3582         nvi_mutex_lock(st);
3583         for (i = 0; i < AUX_PORT_MAX; i++) {
3584                 if (st->aux.port[i].nmp.shutdown_bypass)
3585                         break;
3586         }
3587         if (i < AUX_PORT_MAX) {
3588                 nvi_pm(st, __func__, NVI_PM_ON);
3589                 nvi_aux_bypass_enable(st, false);
3590         }
3591         st->sts &= ~NVS_STS_SUSPEND;
3592         nvi_period_all(st);
3593         ret = nvi_en(st);
3594         if (st->sts & NVS_STS_SPEW_MSG)
3595                 dev_info(&client->dev, "%s elapsed_t=%lldns err=%d\n",
3596                          __func__, nvs_timestamp() - ts, ret);
3597         nvi_mutex_unlock(st);
3598         return ret;
3599 }
3600
3601 static const struct dev_pm_ops nvi_pm_ops = {
3602         .suspend = nvi_suspend,
3603         .resume = nvi_resume,
3604 };
3605
3606 static void nvi_shutdown(struct i2c_client *client)
3607 {
3608         struct nvi_state *st = i2c_get_clientdata(client);
3609         unsigned int i;
3610
3611         st->sts |= NVS_STS_SHUTDOWN;
3612         if (st->nvs) {
3613                 for (i = 0; i < DEV_N; i++)
3614                         st->nvs->shutdown(st->snsr[i].nvs_st);
3615         }
3616         nvi_disable_irq(st);
3617         if (st->hal) {
3618                 nvi_user_ctrl_en(st, __func__, false, false, false, false);
3619                 nvi_pm(st, __func__, NVI_PM_OFF);
3620         }
3621         if (st->sts & NVS_STS_SPEW_MSG)
3622                 dev_info(&client->dev, "%s\n", __func__);
3623 }
3624
3625 static int nvi_remove(struct i2c_client *client)
3626 {
3627         struct nvi_state *st = i2c_get_clientdata(client);
3628         unsigned int i;
3629
3630         if (st != NULL) {
3631                 nvi_shutdown(client);
3632                 if (st->nvs) {
3633                         for (i = 0; i < DEV_N; i++)
3634                                 st->nvs->remove(st->snsr[i].nvs_st);
3635                 }
3636                 nvi_pm_exit(st);
3637         }
3638         dev_info(&client->dev, "%s\n", __func__);
3639         return 0;
3640 }
3641
3642 static struct nvi_id_hal nvi_id_hals[] = {
3643         { NVI_HW_ID_AUTO, NVI_NAME, &nvi_hal_6050 },
3644         { NVI_HW_ID_MPU6050, NVI_NAME_MPU6050, &nvi_hal_6050 },
3645         { NVI_HW_ID_MPU6500, NVI_NAME_MPU6500, &nvi_hal_6500 },
3646         { NVI_HW_ID_MPU6515, NVI_NAME_MPU6515, &nvi_hal_6515 },
3647         { NVI_HW_ID_MPU9150, NVI_NAME_MPU9150, &nvi_hal_6050 },
3648         { NVI_HW_ID_MPU9250, NVI_NAME_MPU9250, &nvi_hal_6500 },
3649         { NVI_HW_ID_MPU9350, NVI_NAME_MPU9350, &nvi_hal_6515 },
3650         { NVI_HW_ID_ICM20628, NVI_NAME_ICM20628, &nvi_hal_20628 },
3651         { NVI_HW_ID_ICM20630, NVI_NAME_ICM20630, &nvi_hal_20628 },
3652         { NVI_HW_ID_ICM20632, NVI_NAME_ICM20632, &nvi_hal_20628 },
3653 };
3654
3655 static int nvi_id2hal(struct nvi_state *st, u8 hw_id)
3656 {
3657         int i;
3658
3659         for (i = 1; i < (int)ARRAY_SIZE(nvi_id_hals); i++) {
3660                 if (nvi_id_hals[i].hw_id == hw_id) {
3661                         st->hal = nvi_id_hals[i].hal;
3662                         return i;
3663                 }
3664         }
3665
3666         return -ENODEV;
3667 }
3668
3669 static int nvi_id_dev(struct nvi_state *st,
3670                       const struct i2c_device_id *i2c_dev_id)
3671 {
3672         u8 hw_id = NVI_HW_ID_AUTO;
3673         unsigned int i = i2c_dev_id->driver_data;
3674         unsigned int dev;
3675         int src;
3676         int ret;
3677
3678         BUG_ON(NVI_NDX_N != ARRAY_SIZE(nvi_i2c_device_id) - 1);
3679         BUG_ON(NVI_NDX_N != ARRAY_SIZE(nvi_id_hals));
3680         st->hal = nvi_id_hals[i].hal;
3681         if (i == NVI_NDX_AUTO) {
3682                 nvi_pm_wr(st, __func__, 0, 0, 0);
3683                 ret = nvi_i2c_rd(st, &st->hal->reg->who_am_i, &hw_id);
3684                 if (ret) {
3685                         dev_err(&st->i2c->dev, "%s AUTO ID FAILED\n",
3686                                 __func__);
3687                         return -ENODEV;
3688                 }
3689
3690                 ret = nvi_id2hal(st, hw_id);
3691                 if (ret < 0) {
3692                         st->hal = &nvi_hal_20628;
3693                         /* cause a master reset by disabling regulators */
3694                         nvs_vregs_disable(&st->i2c->dev, st->vreg,
3695                                           ARRAY_SIZE(nvi_vregs));
3696                         ret = nvi_pm_wr(st, __func__, 0, 0, 0);
3697                         ret = nvi_i2c_rd(st, &st->hal->reg->who_am_i, &hw_id);
3698                         if (ret) {
3699                                 dev_err(&st->i2c->dev, "%s AUTO ID FAILED\n",
3700                                         __func__);
3701                                 return -ENODEV;
3702                         }
3703
3704                         ret = nvi_id2hal(st, hw_id);
3705                         if (ret < 0) {
3706                                 dev_err(&st->i2c->dev,
3707                                         "%s hw_id=%x AUTO ID FAILED\n",
3708                                         __func__, hw_id);
3709                                 return -ENODEV;
3710                         }
3711                 }
3712
3713                 i = ret;
3714         } else {
3715                 /* cause a master reset by disabling regulators */
3716                 nvs_vregs_disable(&st->i2c->dev, st->vreg,
3717                                   ARRAY_SIZE(nvi_vregs));
3718                 nvi_pm_wr(st, __func__, 0, 0, 0);
3719         }
3720
3721         /* populate the rest of st->snsr[dev].cfg */
3722         for (dev = 0; dev < DEV_N; dev++) {
3723                 st->snsr[dev].cfg.part = nvi_id_hals[i].name;
3724                 st->snsr[dev].cfg.version = st->hal->dev[dev]->version;
3725                 st->snsr[dev].cfg.milliamp.ival =
3726                                               st->hal->dev[dev]->milliamp.ival;
3727                 st->snsr[dev].cfg.milliamp.fval =
3728                                               st->hal->dev[dev]->milliamp.fval;
3729         }
3730
3731 #define SRM                             (SENSOR_FLAG_SPECIAL_REPORTING_MODE)
3732 #define OSM                             (SENSOR_FLAG_ONE_SHOT_MODE)
3733         BUG_ON(SRC_N < st->hal->src_n);
3734         for (dev = 0; dev < DEV_N; dev++) {
3735                 src = st->hal->dev[dev]->src;
3736                 if (src < 0)
3737                         continue;
3738
3739                 BUG_ON(src >= st->hal->src_n);
3740                 if ((st->snsr[dev].cfg.flags & SRM) != OSM) {
3741                         st->snsr[dev].cfg.delay_us_min =
3742                                                st->hal->src[src].period_us_min;
3743                         st->snsr[dev].cfg.delay_us_max =
3744                                                st->hal->src[src].period_us_max;
3745                 }
3746         }
3747
3748         ret = nvs_vregs_sts(st->vreg, ARRAY_SIZE(nvi_vregs));
3749         if (ret < 0)
3750                 /* regulators aren't supported so manually do master reset */
3751                 nvi_wr_pm1(st, __func__, BIT_H_RESET);
3752         for (i = 0; i < AXIS_N; i++) {
3753                 st->rom_offset[DEV_ACC][i] = (s16)st->rc.accel_offset[i];
3754                 st->rom_offset[DEV_GYR][i] = (s16)st->rc.gyro_offset[i];
3755                 st->dev_offset[DEV_ACC][i] = 0;
3756                 st->dev_offset[DEV_GYR][i] = 0;
3757         }
3758         if (st->hal->fn->init)
3759                 ret = st->hal->fn->init(st);
3760         else
3761                 ret = 0;
3762         if (hw_id == NVI_HW_ID_AUTO)
3763                 dev_info(&st->i2c->dev, "%s: USING DEVICE TREE: %s\n",
3764                          __func__, i2c_dev_id->name);
3765         else
3766                 dev_info(&st->i2c->dev, "%s: FOUND HW ID=%x  USING: %s\n",
3767                          __func__, hw_id, st->snsr[0].cfg.part);
3768         return ret;
3769 }
3770
3771 static struct sensor_cfg nvi_cfg_dflt[] = {
3772         {
3773                 .name                   = "accelerometer",
3774                 .snsr_id                = DEV_ACC,
3775                 .kbuf_sz                = KBUF_SZ,
3776                 .snsr_data_n            = 14,
3777                 .ch_n                   = AXIS_N,
3778                 .ch_sz                  = -4,
3779                 .vendor                 = NVI_VENDOR,
3780                 .float_significance     = NVS_FLOAT_NANO,
3781                 .ch_n_max               = AXIS_N,
3782                 .thresh_hi              = -1, /* LP */
3783         },
3784         {
3785                 .name                   = "gyroscope",
3786                 .snsr_id                = DEV_GYR,
3787                 .kbuf_sz                = KBUF_SZ,
3788                 .snsr_data_n            = 14,
3789                 .ch_n                   = AXIS_N,
3790                 .ch_sz                  = -4,
3791                 .vendor                 = NVI_VENDOR,
3792                 .max_range              = {
3793                         .ival           = 3,
3794                 },
3795                 .float_significance     = NVS_FLOAT_NANO,
3796                 .ch_n_max               = AXIS_N,
3797         },
3798         {
3799                 .name                   = "gyro_temp",
3800                 .snsr_id                = SENSOR_TYPE_TEMPERATURE,
3801                 .ch_n                   = 1,
3802                 .ch_sz                  = -2,
3803                 .vendor                 = NVI_VENDOR,
3804                 .flags                  = SENSOR_FLAG_ON_CHANGE_MODE,
3805                 .float_significance     = NVS_FLOAT_NANO,
3806         },
3807         {
3808                 .name                   = "significant_motion",
3809                 .snsr_id                = DEV_SM,
3810                 .ch_n                   = 1,
3811                 .ch_sz                  = 1,
3812                 .vendor                 = NVI_VENDOR,
3813                 .delay_us_min           = -1,
3814                 /* delay_us_max is ignored by NVS since this is a one-shot
3815                  * sensor so we use it as a third threshold parameter
3816                  */
3817                 .delay_us_max           = 200, /* SMD_DELAY2_THLD */
3818                 .flags                  = SENSOR_FLAG_ONE_SHOT_MODE |
3819                                           SENSOR_FLAG_WAKE_UP,
3820                 .thresh_lo              = 1500, /* SMD_MOT_THLD */
3821                 .thresh_hi              = 600, /* SMD_DELAY_THLD */
3822         },
3823         {
3824                 .name                   = "step_detector",
3825                 .snsr_id                = DEV_STP,
3826                 .ch_n                   = 1,
3827                 .ch_sz                  = 1,
3828                 .vendor                 = NVI_VENDOR,
3829                 .delay_us_min           = -1,
3830                 .flags                  = SENSOR_FLAG_ONE_SHOT_MODE,
3831         },
3832         {
3833                 .name                   = "quaternion",
3834                 .snsr_id                = SENSOR_TYPE_ORIENTATION,
3835                 .kbuf_sz                = KBUF_SZ,
3836                 .ch_n                   = AXIS_N,
3837                 .ch_sz                  = -4,
3838                 .vendor                 = NVI_VENDOR,
3839                 .delay_us_min           = 10000,
3840                 .delay_us_max           = 255000,
3841         },
3842         {
3843                 .name                   = "geomagnetic_rotation_vector",
3844                 .snsr_id                = DEV_GMR,
3845                 .kbuf_sz                = KBUF_SZ,
3846                 .ch_n                   = 4,
3847                 .ch_sz                  = -4,
3848                 .vendor                 = NVI_VENDOR,
3849                 .delay_us_min           = 10000,
3850                 .delay_us_max           = 255000,
3851         },
3852         {
3853                 .name                   = "gyroscope_uncalibrated",
3854                 .snsr_id                = DEV_GYU,
3855                 .kbuf_sz                = KBUF_SZ,
3856                 .ch_n                   = AXIS_N,
3857                 .ch_sz                  = -2,
3858                 .vendor                 = NVI_VENDOR,
3859                 .delay_us_min           = 10000,
3860                 .delay_us_max           = 255000,
3861         },
3862 };
3863
3864 /* device tree parameters before HAL initialized */
3865 static int nvi_of_dt_pre(struct nvi_state *st, struct device_node *dn)
3866 {
3867         u32 tmp;
3868         char str[64];
3869         unsigned int i;
3870
3871         for (i = 0; i < ARRAY_SIZE(nvi_cfg_dflt); i++)
3872                 memcpy(&st->snsr[i].cfg, &nvi_cfg_dflt[i],
3873                        sizeof(st->snsr[i].cfg));
3874         st->snsr[DEV_AUX].cfg.name = "auxiliary";
3875         st->en_msk = (1 << EN_STDBY);
3876         st->bypass_timeout_ms = NVI_BYPASS_TIMEOUT_MS;
3877         if (!dn)
3878                 return -EINVAL;
3879
3880         /* driver specific parameters */
3881         if (!of_property_read_u32(dn, "standby_en", &tmp)) {
3882                 if (tmp)
3883                         st->en_msk |= (1 << EN_STDBY);
3884                 else
3885                         st->en_msk &= ~(1 << EN_STDBY);
3886         }
3887         of_property_read_u32(dn, "bypass_timeout_ms", &st->bypass_timeout_ms);
3888         for (i = 0; i < DEV_N_AUX; i++) {
3889                 sprintf(str, "%s_push_delay_ns", st->snsr[i].cfg.name);
3890                 of_property_read_u32(dn, str,
3891                                      (u32 *)&st->snsr[i].push_delay_ns);
3892         }
3893
3894         return 0;
3895 }
3896
3897 /* device tree parameters after HAL initialized */
3898 static void nvi_of_dt_post(struct nvi_state *st, struct device_node *dn)
3899 {
3900         u32 tmp;
3901         char str[64];
3902         unsigned int msk;
3903         unsigned int i;
3904         unsigned int j;
3905
3906         /* sensor specific parameters */
3907         for (i = 0; i < DEV_N; i++)
3908                 nvs_of_dt(dn, &st->snsr[i].cfg, NULL);
3909
3910         for (i = 0; i < DEV_N; i++) {
3911                 tmp = 0;
3912                 for (j = 0; j < 9; j++)
3913                         tmp |= st->snsr[i].cfg.matrix[j];
3914                 if (tmp) {
3915                         /* sensor has a matrix */
3916                         sprintf(str, "%s_matrix_enable", st->snsr[i].cfg.name);
3917                         if (!of_property_read_u32(dn, str, &tmp)) {
3918                                 /* matrix override */
3919                                 if (tmp)
3920                                         /* apply matrix within kernel */
3921                                         st->snsr[i].matrix = true;
3922                                 else
3923                                         /* HAL/fusion will handle matrix */
3924                                         st->snsr[i].matrix = false;
3925                         }
3926                 }
3927         }
3928
3929         /* sensor overrides that enable the DMP.
3930          * If the sensor is specific to the DMP and this override is
3931          * disable, then the virtual sensor is removed.
3932          */
3933         if (st->hal->dmp) {
3934                 st->dmp_dev_msk = st->hal->dmp->dev_msk;
3935                 st->dmp_en_msk = st->hal->dmp->en_msk;
3936                 for (i = 0; i < DEV_N_AUX; i++) {
3937                         sprintf(str, "%s_dmp_en",
3938                                 st->snsr[i].cfg.name);
3939                         if (!of_property_read_u32(dn, str, &tmp)) {
3940                                 if (tmp) {
3941                                         msk = 1 << i;
3942                                         if (MSK_DEV_DMP & msk)
3943                                                 st->dmp_dev_msk |= msk;
3944                                         st->dmp_en_msk |= msk;
3945                                 } else {
3946                                         msk = ~(1 << i);
3947                                         if (MSK_DEV_DMP & (1 << i))
3948                                                 st->dmp_dev_msk &= msk;
3949                                         st->dmp_en_msk &= msk;
3950                                 }
3951                         }
3952                 }
3953         }
3954 }
3955
3956 static int nvi_init(struct nvi_state *st,
3957                     const struct i2c_device_id *i2c_dev_id)
3958 {
3959         struct mpu_platform_data *pdata;
3960         signed char matrix[9];
3961         unsigned int i;
3962         unsigned int n;
3963         int ret;
3964
3965         nvi_of_dt_pre(st, st->i2c->dev.of_node);
3966         nvi_pm_init(st);
3967         ret = nvi_id_dev(st, i2c_dev_id);
3968         if (ret)
3969                 return ret;
3970
3971         if (st->i2c->dev.of_node) {
3972                 nvi_of_dt_post(st, st->i2c->dev.of_node);
3973         } else {
3974                 pdata = dev_get_platdata(&st->i2c->dev);
3975                 if (pdata) {
3976                         memcpy(&st->snsr[DEV_ACC].cfg.matrix,
3977                                &pdata->orientation,
3978                                sizeof(st->snsr[DEV_ACC].cfg.matrix));
3979                         memcpy(&st->snsr[DEV_GYR].cfg.matrix,
3980                                &pdata->orientation,
3981                                sizeof(st->snsr[DEV_GYR].cfg.matrix));
3982                 } else {
3983                         dev_err(&st->i2c->dev, "%s dev_get_platdata ERR\n",
3984                                 __func__);
3985                         return -ENODEV;
3986                 }
3987         }
3988
3989         if (st->en_msk & (1 << FW_LOADED))
3990                 ret = 0;
3991         else
3992                 ret = nvi_dmp_fw(st);
3993         if (ret) {
3994                 /* remove DMP dependent sensors */
3995                 n = MSK_DEV_DMP;
3996         } else {
3997                 dev_info(&st->i2c->dev, "%s DMP FW loaded\n", __func__);
3998                 /* remove DMP dependent sensors not supported by this DMP */
3999                 n = MSK_DEV_DMP ^ st->dmp_dev_msk;
4000         }
4001         if (n) {
4002                 for (i = 0; i < DEV_N; i++) {
4003                         if (n & (1 << i))
4004                                 st->snsr[i].cfg.snsr_id = -1;
4005                 }
4006         }
4007
4008         nvi_nvs_fn.sts = &st->sts;
4009         nvi_nvs_fn.errs = &st->errs;
4010         st->nvs = nvs_iio();
4011         if (st->nvs == NULL)
4012                 return -ENODEV;
4013
4014         n = 0;
4015         for (i = 0; i < DEV_N; i++) {
4016                 if (st->snsr[i].matrix) {
4017                         /* matrix handled at kernel so remove from NVS */
4018                         memcpy(matrix, st->snsr[i].cfg.matrix, sizeof(matrix));
4019                         memset(st->snsr[i].cfg.matrix, 0,
4020                                sizeof(st->snsr[i].cfg.matrix));
4021                 }
4022                 ret = st->nvs->probe(&st->snsr[i].nvs_st, st, &st->i2c->dev,
4023                                      &nvi_nvs_fn, &st->snsr[i].cfg);
4024                 if (!ret) {
4025                         st->snsr[i].cfg.snsr_id = i;
4026                         if (st->snsr[i].matrix)
4027                                 memcpy(st->snsr[i].cfg.matrix, matrix,
4028                                        sizeof(st->snsr[i].cfg.matrix));
4029                         nvi_max_range(st, i, st->snsr[i].cfg.max_range.ival);
4030                         n++;
4031                 }
4032         }
4033         if (!n)
4034                 return -ENODEV;
4035
4036         ret = request_threaded_irq(st->i2c->irq, nvi_handler, nvi_thread,
4037                                    IRQF_TRIGGER_RISING, NVI_NAME, st);
4038         if (ret) {
4039                 dev_err(&st->i2c->dev, "%s req_threaded_irq ERR %d\n",
4040                         __func__, ret);
4041                 return -ENOMEM;
4042         }
4043
4044         nvi_pm(st, __func__, NVI_PM_AUTO);
4045         nvi_state_local = st;
4046         return 0;
4047 }
4048
4049 static void nvi_dmp_fw_load_worker(struct work_struct *work)
4050 {
4051         struct nvi_pdata *pd = container_of(work, struct nvi_pdata,
4052                                             fw_load_work);
4053         struct nvi_state *st = &pd->st;
4054         int ret;
4055
4056         ret = nvi_init(st, pd->i2c_dev_id);
4057         if (ret) {
4058                 dev_err(&st->i2c->dev, "%s ERR %d\n", __func__, ret);
4059                 nvi_remove(st->i2c);
4060         }
4061         dev_info(&st->i2c->dev, "%s done\n", __func__);
4062 }
4063
4064 static int nvi_probe(struct i2c_client *client,
4065                      const struct i2c_device_id *i2c_dev_id)
4066 {
4067         struct nvi_pdata *pd;
4068         struct nvi_state *st;
4069         int ret;
4070
4071         dev_info(&client->dev, "%s %s\n", __func__, i2c_dev_id->name);
4072         if (!client->irq) {
4073                 dev_err(&client->dev, "%s ERR: no interrupt\n", __func__);
4074                 return -ENODEV;
4075         }
4076
4077         /* just test if global disable */
4078         ret = nvs_of_dt(client->dev.of_node, NULL, NULL);
4079         if (ret == -ENODEV) {
4080                 dev_info(&client->dev, "%s DT disabled\n", __func__);
4081                 return -ENODEV;
4082         }
4083
4084         pd = devm_kzalloc(&client->dev, sizeof(*pd), GFP_KERNEL);
4085         if (pd == NULL)
4086                 return -ENOMEM;
4087
4088         st = &pd->st;
4089         i2c_set_clientdata(client, pd);
4090         st->i2c = client;
4091         pd->i2c_dev_id = i2c_dev_id;
4092         /* Init fw load worker thread */
4093         INIT_WORK(&pd->fw_load_work, nvi_dmp_fw_load_worker);
4094         schedule_work(&pd->fw_load_work);
4095         return 0;
4096 }
4097
4098 MODULE_DEVICE_TABLE(i2c, nvi_i2c_device_id);
4099
4100 static const struct of_device_id nvi_of_match[] = {
4101         { .compatible = "invensense,mpu6xxx", },
4102         { .compatible = "invensense,mpu6050", },
4103         { .compatible = "invensense,mpu6500", },
4104         { .compatible = "invensense,mpu6515", },
4105         { .compatible = "invensense,mpu9150", },
4106         { .compatible = "invensense,mpu9250", },
4107         { .compatible = "invensense,mpu9350", },
4108         { .compatible = "invensense,icm20628", },
4109         { .compatible = "invensense,icm20630", },
4110         { .compatible = "invensense,icm20632", },
4111         {}
4112 };
4113
4114 MODULE_DEVICE_TABLE(of, nvi_of_match);
4115
4116 static struct i2c_driver nvi_i2c_driver = {
4117         .class                          = I2C_CLASS_HWMON,
4118         .probe                          = nvi_probe,
4119         .remove                         = nvi_remove,
4120         .shutdown                       = nvi_shutdown,
4121         .driver                         = {
4122                 .name                   = NVI_NAME,
4123                 .owner                  = THIS_MODULE,
4124                 .of_match_table         = of_match_ptr(nvi_of_match),
4125                 .pm                     = &nvi_pm_ops,
4126         },
4127         .id_table                       = nvi_i2c_device_id,
4128 };
4129
4130 module_i2c_driver(nvi_i2c_driver);
4131
4132 MODULE_LICENSE("GPL");
4133 MODULE_DESCRIPTION("NVidiaInvensense driver");
4134 MODULE_AUTHOR("NVIDIA Corporation");
4135