c794053c4c67db050fb56a7a602472da008fcc31
[linux-2.6.git] / drivers / input / touchscreen / maxim_sti.c
1 /* drivers/input/touchscreen/maxim_sti.c
2  *
3  * Maxim SmartTouch Imager Touchscreen Driver
4  *
5  * Copyright (c)2013 Maxim Integrated Products, Inc.
6  * Copyright (C) 2013, NVIDIA Corporation.  All Rights Reserved.
7  *
8  * This software is licensed under the terms of the GNU General Public
9  * License version 2, as published by the Free Software Foundation, and
10  * may be copied, distributed, and modified under those terms.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  */
18
19 #include <linux/module.h>
20 #include <linux/kmod.h>
21 #include <linux/kthread.h>
22 #include <linux/spi/spi.h>
23 #include <linux/firmware.h>
24 #include <linux/crc16.h>
25 #include <linux/interrupt.h>
26 #include <linux/input.h>
27 #include <linux/regulator/consumer.h>
28 #include <linux/maxim_sti.h>
29 #include <asm/byteorder.h>  /* MUST include this header to get byte order */
30
31 #define CREATE_TRACE_POINTS
32 #include <trace/events/touchscreen_maxim.h>
33
34 /****************************************************************************\
35 * Custom features                                                            *
36 \****************************************************************************/
37
38 #define INPUT_DEVICES               2
39 #define INPUT_ENABLE_DISABLE        1
40 #define SUSPEND_POWER_OFF           1
41 #define NV_ENABLE_CPU_BOOST         1
42 #define NV_STYLUS_FINGER_EXCLUSION  1
43
44 #define ID_FINGER   0
45 #define ID_STYLUS   (INPUT_DEVICES - 1)
46
47 #if NV_ENABLE_CPU_BOOST
48 #define INPUT_IDLE_PERIOD     (msecs_to_jiffies(50))
49 #endif
50
51 /****************************************************************************\
52 * Device context structure, globals, and macros                              *
53 \****************************************************************************/
54
55 struct dev_data;
56
57 struct chip_access_method {
58         int (*read)(struct dev_data *dd, u16 address, u8 *buf, u16 len);
59         int (*write)(struct dev_data *dd, u16 address, u8 *buf, u16 len);
60 };
61
62 struct dev_data {
63         u8                           *tx_buf;
64         u8                           *rx_buf;
65         u8                           send_fail_count;
66         u32                          nl_seq;
67         u8                           nl_mc_group_count;
68         bool                         nl_enabled;
69         bool                         start_fusion;
70         bool                         suspend_in_progress;
71         bool                         resume_in_progress;
72         bool                         expect_resume_ack;
73         bool                         eraser_active;
74 #if (INPUT_DEVICES > 1)
75         bool                         last_finger_active;
76         bool                         last_stylus_active;
77 #endif
78         bool                         legacy_acceleration;
79 #if INPUT_ENABLE_DISABLE
80         bool                         input_no_deconfig;
81 #endif
82         bool                         irq_registered;
83         u16                          irq_param[MAX_IRQ_PARAMS];
84         pid_t                        fusion_process;
85         char                         input_phys[128];
86         struct input_dev             *input_dev[INPUT_DEVICES];
87         struct completion            suspend_resume;
88         struct chip_access_method    chip;
89         struct spi_device            *spi;
90         struct genl_family           nl_family;
91         struct genl_ops              *nl_ops;
92         struct genl_multicast_group  *nl_mc_groups;
93         struct sk_buff               *outgoing_skb;
94         struct sk_buff_head          incoming_skb_queue;
95         struct task_struct           *thread;
96         struct sched_param           thread_sched;
97         struct list_head             dev_list;
98         struct regulator             *reg_avdd;
99         struct regulator             *reg_dvdd;
100         void                         (*service_irq)(struct dev_data *dd);
101 #if NV_ENABLE_CPU_BOOST
102         unsigned long                last_irq_jiffies;
103 #endif
104 };
105
106 atomic_t touch_dvdd_on = ATOMIC_INIT(1);
107 static int prev_dvdd_rail_state;
108
109 static struct list_head  dev_list;
110 static spinlock_t        dev_lock;
111
112 static irqreturn_t irq_handler(int irq, void *context);
113 static void service_irq(struct dev_data *dd);
114 static void service_irq_legacy_acceleration(struct dev_data *dd);
115
116 #define ERROR(a, b...) printk(KERN_ERR "%s driver(ERROR:%s:%d): " a "\n", \
117                               dd->nl_family.name, __func__, __LINE__, ##b)
118 #define INFO(a, b...) printk(KERN_INFO "%s driver: " a "\n", \
119                              dd->nl_family.name, ##b)
120
121 /****************************************************************************\
122 * Chip access methods                                                        *
123 \****************************************************************************/
124
125 static inline int
126 spi_read_123(struct dev_data *dd, u16 address, u8 *buf, u16 len, bool add_len)
127 {
128         struct spi_message   message;
129         struct spi_transfer  transfer;
130         u16                  *tx_buf = (u16 *)dd->tx_buf;
131         u16                  *rx_buf = (u16 *)dd->rx_buf;
132         u16                  words = len / sizeof(u16), header_len = 1;
133         u16                  *ptr2 = rx_buf + 1;
134 #ifdef __LITTLE_ENDIAN
135         u16                  *ptr1 = (u16 *)buf, i;
136 #endif
137         int                  ret;
138
139         if (tx_buf == NULL || rx_buf == NULL)
140                 return -ENOMEM;
141
142         tx_buf[0] = (address << 1) | 0x0001;
143 #ifdef __LITTLE_ENDIAN
144         tx_buf[0] = (tx_buf[0] << 8) | (tx_buf[0] >> 8);
145 #endif
146
147         if (add_len) {
148                 tx_buf[1] = words;
149 #ifdef __LITTLE_ENDIAN
150                 tx_buf[1] = (tx_buf[1] << 8) | (tx_buf[1] >> 8);
151 #endif
152                 ptr2++;
153                 header_len++;
154         }
155
156         spi_message_init(&message);
157         memset(&transfer, 0, sizeof(transfer));
158
159         transfer.len = len + header_len * sizeof(u16);
160         transfer.tx_buf = tx_buf;
161         transfer.rx_buf = rx_buf;
162         spi_message_add_tail(&transfer, &message);
163
164         do {
165                 ret = spi_sync(dd->spi, &message);
166         } while (ret == -EAGAIN);
167
168 #ifdef __LITTLE_ENDIAN
169         for (i = 0; i < words; i++)
170                 ptr1[i] = (ptr2[i] << 8) | (ptr2[i] >> 8);
171 #else
172         memcpy(buf, ptr2, len);
173 #endif
174         return ret;
175 }
176
177 static inline int
178 spi_write_123(struct dev_data *dd, u16 address, u8 *buf, u16 len,
179               bool add_len)
180 {
181         struct maxim_sti_pdata  *pdata = dd->spi->dev.platform_data;
182         u16                     *tx_buf = (u16 *)dd->tx_buf;
183         u16                     words = len / sizeof(u16), header_len = 1;
184 #ifdef __LITTLE_ENDIAN
185         u16                     i;
186 #endif
187         int  ret;
188
189         if (tx_buf == NULL)
190                 return -ENOMEM;
191
192         tx_buf[0] = address << 1;
193         if (add_len) {
194                 tx_buf[1] = words;
195                 header_len++;
196         }
197         memcpy(tx_buf + header_len, buf, len);
198 #ifdef __LITTLE_ENDIAN
199         for (i = 0; i < (words + header_len); i++)
200                 tx_buf[i] = (tx_buf[i] << 8) | (tx_buf[i] >> 8);
201 #endif
202
203         do {
204                 ret = spi_write(dd->spi, tx_buf,
205                                 len + header_len * sizeof(u16));
206         } while (ret == -EAGAIN);
207
208         memset(dd->tx_buf, 0xFF, pdata->tx_buf_size);
209         return ret;
210 }
211
212 /* ======================================================================== */
213
214 static int
215 spi_read_1(struct dev_data *dd, u16 address, u8 *buf, u16 len)
216 {
217         return spi_read_123(dd, address, buf, len, true);
218 }
219
220 static int
221 spi_write_1(struct dev_data *dd, u16 address, u8 *buf, u16 len)
222 {
223         return spi_write_123(dd, address, buf, len, true);
224 }
225
226 /* ======================================================================== */
227
228 static inline int
229 stop_legacy_acceleration(struct dev_data *dd)
230 {
231         u16  value = 0xDEAD, status, i;
232         int  ret;
233
234         ret = spi_write_123(dd, 0x0003, (u8 *)&value,
235                                 sizeof(value), false);
236         if (ret < 0)
237                 return -1;
238         usleep_range(100, 120);
239
240         for (i = 0; i < 200; i++) {
241                 ret = spi_read_123(dd, 0x0003, (u8 *)&status, sizeof(status),
242                                    false);
243                 if (ret < 0)
244                         return -1;
245                 if (status == 0xABCD)
246                         return 0;
247         }
248
249         return -2;
250 }
251
252 static inline int
253 start_legacy_acceleration(struct dev_data *dd)
254 {
255         u16  value = 0xBEEF;
256         int  ret;
257
258         ret = spi_write_123(dd, 0x0003, (u8 *)&value, sizeof(value), false);
259         usleep_range(100, 120);
260
261         return ret;
262 }
263
264 static inline int
265 spi_rw_2_poll_status(struct dev_data *dd)
266 {
267         u16  status, i;
268         int  ret;
269
270         for (i = 0; i < 200; i++) {
271                 ret = spi_read_123(dd, 0x0000, (u8 *)&status, sizeof(status),
272                                    false);
273                 if (ret < 0)
274                         return -1;
275                 if (status == 0xABCD)
276                         return 0;
277         }
278
279         return -2;
280 }
281
282 static inline int
283 spi_read_2_page(struct dev_data *dd, u16 address, u8 *buf, u16 len)
284 {
285         u16  request[] = {0xFEDC, (address << 1) | 0x0001, len / sizeof(u16)};
286         int  ret;
287
288         /* write read request header */
289         ret = spi_write_123(dd, 0x0000, (u8 *)request, sizeof(request),
290                             false);
291         if (ret < 0)
292                 return -1;
293
294         /* poll status */
295         ret = spi_rw_2_poll_status(dd);
296         if (ret < 0)
297                 return ret;
298
299         /* read data */
300         ret = spi_read_123(dd, 0x0004, (u8 *)buf, len, false);
301         return ret;
302 }
303
304 static inline int
305 spi_write_2_page(struct dev_data *dd, u16 address, u8 *buf, u16 len)
306 {
307         u16  page[254];
308         int  ret;
309
310         page[0] = 0xFEDC;
311         page[1] = address << 1;
312         page[2] = len / sizeof(u16);
313         memcpy(page + 4, buf, len);
314
315         /* write data with write request header */
316         ret = spi_write_123(dd, 0x0000, (u8 *)page, len + 4 * sizeof(u16),
317                             false);
318         if (ret < 0)
319                 return -1;
320
321         /* poll status */
322         return spi_rw_2_poll_status(dd);
323 }
324
325 static inline int
326 spi_rw_2(struct dev_data *dd, u16 address, u8 *buf, u16 len,
327          int (*func)(struct dev_data *dd, u16 address, u8 *buf, u16 len))
328 {
329         u16  rx_len, rx_limit = 250 * sizeof(u16), offset = 0;
330         int  ret;
331
332         while (len > 0) {
333                 rx_len = (len > rx_limit) ? rx_limit : len;
334                 if (dd->legacy_acceleration)
335                         stop_legacy_acceleration(dd);
336                 ret = func(dd, address + (offset / sizeof(u16)), buf + offset,
337                            rx_len);
338                 if (dd->legacy_acceleration)
339                         start_legacy_acceleration(dd);
340                 if (ret < 0)
341                         return ret;
342                 offset += rx_len;
343                 len -= rx_len;
344         }
345
346         return 0;
347 }
348
349 static int
350 spi_read_2(struct dev_data *dd, u16 address, u8 *buf, u16 len)
351 {
352         return spi_rw_2(dd, address, buf, len, spi_read_2_page);
353 }
354
355 static int
356 spi_write_2(struct dev_data *dd, u16 address, u8 *buf, u16 len)
357 {
358         return spi_rw_2(dd, address, buf, len, spi_write_2_page);
359 }
360
361 /* ======================================================================== */
362
363 static int
364 spi_read_3(struct dev_data *dd, u16 address, u8 *buf, u16 len)
365 {
366         return spi_read_123(dd, address, buf, len, false);
367 }
368
369 static int
370 spi_write_3(struct dev_data *dd, u16 address, u8 *buf, u16 len)
371 {
372         return spi_write_123(dd, address, buf, len, false);
373 }
374
375 /* ======================================================================== */
376
377 static struct chip_access_method chip_access_methods[] = {
378         {
379                 .read = spi_read_1,
380                 .write = spi_write_1,
381         },
382         {
383                 .read = spi_read_2,
384                 .write = spi_write_2,
385         },
386         {
387                 .read = spi_read_3,
388                 .write = spi_write_3,
389         },
390 };
391
392 static int
393 set_chip_access_method(struct dev_data *dd, u8 method)
394 {
395         if (method == 0 || method > ARRAY_SIZE(chip_access_methods))
396                 return -1;
397
398         memcpy(&dd->chip, &chip_access_methods[method - 1], sizeof(dd->chip));
399         return 0;
400 }
401
402 /* ======================================================================== */
403
404 static inline int
405 stop_legacy_acceleration_canned(struct dev_data *dd)
406 {
407         u16  value = dd->irq_param[18];
408
409         return dd->chip.write(dd, dd->irq_param[16], (u8 *)&value,
410                               sizeof(value));
411 }
412
413 static inline int
414 start_legacy_acceleration_canned(struct dev_data *dd)
415 {
416         u16  value = dd->irq_param[17];
417
418         return dd->chip.write(dd, dd->irq_param[16], (u8 *)&value,
419                               sizeof(value));
420 }
421
422 /* ======================================================================== */
423
424 #define FLASH_BLOCK_SIZE  64      /* flash write buffer in words */
425 #define FIRMWARE_SIZE     0xC000  /* fixed 48Kbytes */
426
427 static int bootloader_wait_ready(struct dev_data *dd)
428 {
429         u16  status, i;
430
431         for (i = 0; i < 15; i++) {
432                 if (spi_read_3(dd, 0x00FF, (u8 *)&status,
433                                sizeof(status)) != 0)
434                         return -1;
435                 if (status == 0xABCC)
436                         return 0;
437                 if (i >= 3)
438                         usleep_range(500, 700);
439         }
440         ERROR("unexpected status %04X", status);
441         return -1;
442 }
443
444 static int bootloader_complete(struct dev_data *dd)
445 {
446         u16  value = 0x5432;
447
448         return spi_write_3(dd, 0x00FF, (u8 *)&value, sizeof(value));
449 }
450
451 static int bootloader_read_data(struct dev_data *dd, u16 *value)
452 {
453         u16  buffer[2];
454
455         if (spi_read_3(dd, 0x00FE, (u8 *)buffer, sizeof(buffer)) != 0)
456                 return -1;
457         if (buffer[1] != 0xABCC)
458                 return -1;
459
460         *value = buffer[0];
461         return bootloader_complete(dd);
462 }
463
464 static int bootloader_write_data(struct dev_data *dd, u16 value)
465 {
466         u16  buffer[2] = {value, 0x5432};
467
468         if (bootloader_wait_ready(dd) != 0)
469                 return -1;
470         return spi_write_3(dd, 0x00FE, (u8 *)buffer, sizeof(buffer));
471 }
472
473 static int bootloader_wait_command(struct dev_data *dd)
474 {
475         u16  value, i;
476
477         for (i = 0; i < 15; i++) {
478                 if (bootloader_read_data(dd, &value) == 0 && value == 0x003E)
479                         return 0;
480                 if (i >= 3)
481                         usleep_range(500, 700);
482         }
483         return -1;
484 }
485
486 static int bootloader_enter(struct dev_data *dd)
487 {
488         int i;
489         u16 enter[3] = {0x0047, 0x00C7, 0x0007};
490
491         for (i = 0; i < 3; i++) {
492                 if (spi_write_3(dd, 0x7F00, (u8 *)&enter[i],
493                                 sizeof(enter[i])) != 0)
494                         return -1;
495         }
496
497         if (bootloader_wait_command(dd) != 0)
498                 return -1;
499         return 0;
500 }
501
502 static int bootloader_exit(struct dev_data *dd)
503 {
504         u16  value = 0x0000;
505
506         if (bootloader_write_data(dd, 0x0001) != 0)
507                 return -1;
508         return spi_write_3(dd, 0x7F00, (u8 *)&value, sizeof(value));
509 }
510
511 static int bootloader_get_crc(struct dev_data *dd, u16 *crc16, u16 len)
512 {
513         u16 command[] = {0x0030, 0x0002, 0x0000, 0x0000, len & 0xFF,
514                         len >> 8}, value[2], i;
515
516         for (i = 0; i < ARRAY_SIZE(command); i++)
517                 if (bootloader_write_data(dd, command[i]) != 0)
518                         return -1;
519         msleep(200); /* wait 200ms for it to get done */
520
521         for (i = 0; i < 2; i++)
522                 if (bootloader_read_data(dd, &value[i]) != 0)
523                         return -1;
524
525         if (bootloader_wait_command(dd) != 0)
526                 return -1;
527         *crc16 = (value[1] << 8) | value[0];
528         return 0;
529 }
530
531 static int bootloader_set_byte_mode(struct dev_data *dd)
532 {
533         u16  command[2] = {0x000A, 0x0000}, i;
534
535         for (i = 0; i < ARRAY_SIZE(command); i++)
536                 if (bootloader_write_data(dd, command[i]) != 0)
537                         return -1;
538         if (bootloader_wait_command(dd) != 0)
539                 return -1;
540         return 0;
541 }
542
543 static int bootloader_erase_flash(struct dev_data *dd)
544 {
545         if (bootloader_write_data(dd, 0x0002) != 0)
546                 return -1;
547         msleep(60); /* wait 60ms */
548         if (bootloader_wait_command(dd) != 0)
549                 return -1;
550         return 0;
551 }
552
553 static int bootloader_write_flash(struct dev_data *dd, u16 *image, u16 len)
554 {
555         u16  command[] = {0x00F0, 0x0000, len >> 8, 0x0000, 0x0000};
556         u16  i, buffer[FLASH_BLOCK_SIZE];
557
558         for (i = 0; i < ARRAY_SIZE(command); i++)
559                 if (bootloader_write_data(dd, command[i]) != 0)
560                         return -1;
561
562         for (i = 0; i < ((len / sizeof(u16)) / FLASH_BLOCK_SIZE); i++) {
563                 if (bootloader_wait_ready(dd) != 0)
564                         return -1;
565                 memcpy(buffer, (void *)(image + i * FLASH_BLOCK_SIZE),
566                         sizeof(buffer));
567                 if (spi_write_3(dd, ((i % 2) == 0) ? 0x0000 : 0x0040,
568                                 (u8 *)buffer, sizeof(buffer)) != 0)
569                         return -1;
570                 if (bootloader_complete(dd) != 0)
571                         return -1;
572         }
573
574         usleep_range(10000, 11000);
575         if (bootloader_wait_command(dd) != 0)
576                 return -1;
577         return 0;
578 }
579
580 static int device_fw_load(struct dev_data *dd, const struct firmware *fw)
581 {
582         u16  fw_crc16, chip_crc16;
583
584         fw_crc16 = crc16(0, fw->data, fw->size);
585         INFO("firmware size (%d) CRC16(0x%04X)", fw->size, fw_crc16);
586         if (bootloader_enter(dd) != 0) {
587                 ERROR("failed to enter bootloader");
588                 return -1;
589         }
590         if (bootloader_get_crc(dd, &chip_crc16, fw->size) != 0) {
591                 ERROR("failed to get CRC16 from the chip");
592                 return -1;
593         }
594         INFO("chip CRC16(0x%04X)", chip_crc16);
595         if (fw_crc16 != chip_crc16) {
596                 INFO("will reprogram chip");
597                 if (bootloader_erase_flash(dd) != 0) {
598                         ERROR("failed to erase chip flash");
599                         return -1;
600                 }
601                 INFO("flash erase OK");
602                 if (bootloader_set_byte_mode(dd) != 0) {
603                         ERROR("failed to set byte mode");
604                         return -1;
605                 }
606                 INFO("byte mode OK");
607                 if (bootloader_write_flash(dd, (u16 *)fw->data,
608                                                         fw->size) != 0) {
609                         ERROR("failed to write flash");
610                         return -1;
611                 }
612                 INFO("flash write OK");
613                 if (bootloader_get_crc(dd, &chip_crc16, fw->size) != 0) {
614                         ERROR("failed to get CRC16 from the chip");
615                         return -1;
616                 }
617                 if (fw_crc16 != chip_crc16) {
618                         ERROR("failed to verify programming! (0x%04X)",
619                               chip_crc16);
620                         return -1;
621                 }
622                 INFO("chip programmed successfully, new chip CRC16(0x%04X)",
623                         chip_crc16);
624         }
625         if (bootloader_exit(dd) != 0) {
626                 ERROR("failed to exit bootloader");
627                 return -1;
628         }
629         return 0;
630 }
631
632 static int fw_request_load(struct dev_data *dd)
633 {
634         const struct firmware *fw;
635         struct maxim_sti_pdata *pdata = dd->spi->dev.platform_data;
636         char *fw_name = pdata->fw_name;
637         int  ret;
638
639         ret = request_firmware(&fw, fw_name, &dd->spi->dev);
640         if (ret || fw == NULL) {
641                 ERROR("firmware request failed (%d,%p)", ret, fw);
642                 return -1;
643         }
644         if (fw->size != FIRMWARE_SIZE) {
645                 release_firmware(fw);
646                 ERROR("incoming firmware is of wrong size (%04X)", fw->size);
647                 return -1;
648         }
649         ret = device_fw_load(dd, fw);
650         if (ret != 0 && bootloader_exit(dd) != 0)
651                 ERROR("failed to exit bootloader");
652         release_firmware(fw);
653         return ret;
654 }
655
656 /* ======================================================================== */
657
658 static void stop_scan_canned(struct dev_data *dd)
659 {
660         u16  value;
661
662         if (dd->legacy_acceleration)
663                 (void)stop_legacy_acceleration_canned(dd);
664         value = dd->irq_param[13];
665         (void)dd->chip.write(dd, dd->irq_param[12], (u8 *)&value,
666                              sizeof(value));
667         value = dd->irq_param[11];
668         (void)dd->chip.write(dd, dd->irq_param[0], (u8 *)&value,
669                              sizeof(value));
670         usleep_range(dd->irq_param[15], dd->irq_param[15] + 1000);
671         (void)dd->chip.write(dd, dd->irq_param[0], (u8 *)&value,
672                              sizeof(value));
673 }
674
675 #if !SUSPEND_POWER_OFF
676 static void start_scan_canned(struct dev_data *dd)
677 {
678         u16  value;
679
680         if (dd->legacy_acceleration) {
681                 (void)start_legacy_acceleration_canned(dd);
682         } else {
683                 value = dd->irq_param[14];
684                 (void)dd->chip.write(dd, dd->irq_param[12], (u8 *)&value,
685                                      sizeof(value));
686         }
687 }
688 #endif
689
690 static int regulator_control(struct dev_data *dd, bool on)
691 {
692         int ret = 0;
693
694         if (!dd->reg_avdd || !dd->reg_dvdd)
695                 return 0;
696
697         if (on) {
698                 ret = regulator_enable(dd->reg_dvdd);
699                 if (ret < 0) {
700                         ERROR("Failed to enable regulator dvdd: %d", ret);
701                         return ret;
702                 }
703                 usleep_range(1000, 1020);
704
705                 ret = regulator_enable(dd->reg_avdd);
706                 if (ret < 0) {
707                         ERROR("Failed to enable regulator avdd: %d", ret);
708                         regulator_disable(dd->reg_dvdd);
709                         return ret;
710                 }
711                 if (prev_dvdd_rail_state == 0)
712                         atomic_set(&touch_dvdd_on, 1);
713
714                 prev_dvdd_rail_state = 1;
715         } else {
716                 if (regulator_is_enabled(dd->reg_avdd))
717                         ret = regulator_disable(dd->reg_avdd);
718                 if (ret < 0) {
719                         ERROR("Failed to disable regulator avdd: %d", ret);
720                         return ret;
721                 }
722
723                 if (regulator_is_enabled(dd->reg_dvdd))
724                         ret = regulator_disable(dd->reg_dvdd);
725                 if (ret < 0) {
726                         ERROR("Failed to disable regulator dvdd: %d", ret);
727                         regulator_enable(dd->reg_avdd);
728                         return ret;
729                 }
730
731                 if (!regulator_is_enabled(dd->reg_dvdd)) {
732                         prev_dvdd_rail_state = 0;
733                         msleep(200);
734                 } else
735                         prev_dvdd_rail_state = 1;
736         }
737
738         return 0;
739 }
740
741 static void regulator_init(struct dev_data *dd)
742 {
743         dd->reg_avdd = devm_regulator_get(&dd->spi->dev, "avdd");
744         if (IS_ERR(dd->reg_avdd))
745                 goto err_null_regulator;
746
747         dd->reg_dvdd = devm_regulator_get(&dd->spi->dev, "dvdd");
748         if (IS_ERR(dd->reg_dvdd))
749                 goto err_null_regulator;
750
751         return;
752
753 err_null_regulator:
754         dd->reg_avdd = NULL;
755         dd->reg_dvdd = NULL;
756         dev_warn(&dd->spi->dev, "Failed to init regulators\n");
757 }
758
759 /****************************************************************************\
760 * Suspend/resume processing                                                  *
761 \****************************************************************************/
762
763 #ifdef CONFIG_PM_SLEEP
764 static int suspend(struct device *dev)
765 {
766         struct dev_data  *dd = spi_get_drvdata(to_spi_device(dev));
767         struct maxim_sti_pdata *pdata = dev->platform_data;
768         int ret;
769
770         INFO("suspending...");
771
772         if (dd->suspend_in_progress)
773                 return 0;
774
775         dd->suspend_in_progress = true;
776         wake_up_process(dd->thread);
777         wait_for_completion(&dd->suspend_resume);
778
779 #if SUSPEND_POWER_OFF
780         /* reset-low and power-down */
781         pdata->reset(pdata, 0);
782         usleep_range(100, 120);
783         ret = regulator_control(dd, false);
784         if (ret < 0)
785                 return ret;
786 #endif
787
788         INFO("suspend...done");
789
790         return 0;
791 }
792
793 static int resume(struct device *dev)
794 {
795         struct dev_data  *dd = spi_get_drvdata(to_spi_device(dev));
796         struct maxim_sti_pdata *pdata = dev->platform_data;
797         int ret;
798
799         INFO("resuming...");
800
801         if (!dd->suspend_in_progress)
802                 return 0;
803
804 #if SUSPEND_POWER_OFF
805         /* power-up and reset-high */
806         pdata->reset(pdata, 0);
807         ret = regulator_control(dd, true);
808         if (ret < 0)
809                 return ret;
810         usleep_range(300, 400);
811         pdata->reset(pdata, 1);
812 #endif
813
814         dd->resume_in_progress = true;
815         wake_up_process(dd->thread);
816         wait_for_completion(&dd->suspend_resume);
817
818         INFO("resume...done");
819
820         return 0;
821 }
822
823 static const struct dev_pm_ops pm_ops = {
824         .suspend = suspend,
825         .resume = resume,
826 };
827
828 #if INPUT_ENABLE_DISABLE
829 static int input_disable(struct input_dev *dev)
830 {
831         struct dev_data *dd = input_get_drvdata(dev);
832
833         return suspend(&dd->spi->dev);
834 }
835
836 static int input_enable(struct input_dev *dev)
837 {
838         struct dev_data *dd = input_get_drvdata(dev);
839
840         return resume(&dd->spi->dev);
841 }
842 #endif
843 #endif
844
845 /****************************************************************************\
846 * Netlink processing                                                         *
847 \****************************************************************************/
848
849 static inline int
850 nl_msg_new(struct dev_data *dd, u8 dst)
851 {
852         dd->outgoing_skb = alloc_skb(NL_BUF_SIZE, GFP_KERNEL);
853         if (dd->outgoing_skb == NULL)
854                 return -ENOMEM;
855         nl_msg_init(dd->outgoing_skb->data, dd->nl_family.id, dd->nl_seq++,
856                     dst);
857         if (dd->nl_seq == 0)
858                 dd->nl_seq++;
859         return 0;
860 }
861
862 static int
863 nl_callback_noop(struct sk_buff *skb, struct genl_info *info)
864 {
865         return 0;
866 }
867
868 static inline bool
869 nl_process_driver_msg(struct dev_data *dd, u16 msg_id, void *msg)
870 {
871         struct maxim_sti_pdata        *pdata = dd->spi->dev.platform_data;
872         struct dr_add_mc_group        *add_mc_group_msg;
873         struct dr_echo_request        *echo_msg;
874         struct fu_echo_response       *echo_response;
875         struct dr_chip_read           *read_msg;
876         struct fu_chip_read_result    *read_result;
877         struct dr_chip_write          *write_msg;
878         struct dr_chip_access_method  *chip_access_method_msg;
879         struct dr_delay               *delay_msg;
880         struct fu_irqline_status      *irqline_status;
881         struct dr_config_irq          *config_irq_msg;
882         struct dr_config_input        *config_input_msg;
883         struct dr_config_watchdog     *config_watchdog_msg;
884         struct dr_input               *input_msg;
885         struct dr_legacy_acceleration *legacy_acceleration_msg;
886         u8                            i, inp;
887         int                           ret;
888
889         if (dd->expect_resume_ack && msg_id != DR_DECONFIG &&
890             msg_id != DR_RESUME_ACK && msg_id != DR_CONFIG_WATCHDOG &&
891                 msg_id != DR_ADD_MC_GROUP && msg_id != DR_ECHO_REQUEST)
892                 return false;
893
894         switch (msg_id) {
895         case DR_ADD_MC_GROUP:
896                 add_mc_group_msg = msg;
897                 if (add_mc_group_msg->number >= pdata->nl_mc_groups) {
898                         ERROR("invalid multicast group number %d (%d)",
899                               add_mc_group_msg->number, pdata->nl_mc_groups);
900                         return false;
901                 }
902                 if (dd->nl_mc_groups[add_mc_group_msg->number].id != 0)
903                         return false;
904                 dd->nl_ops[add_mc_group_msg->number].cmd =
905                                                 add_mc_group_msg->number;
906                 dd->nl_ops[add_mc_group_msg->number].doit = nl_callback_noop;
907                 ret = genl_register_ops(&dd->nl_family,
908                                 &dd->nl_ops[add_mc_group_msg->number]);
909                 if (ret < 0)
910                         ERROR("failed to add multicast group op (%d)", ret);
911                 GENL_COPY(dd->nl_mc_groups[add_mc_group_msg->number].name,
912                           add_mc_group_msg->name);
913                 ret = genl_register_mc_group(&dd->nl_family,
914                                 &dd->nl_mc_groups[add_mc_group_msg->number]);
915                 if (ret < 0)
916                         ERROR("failed to add multicast group (%d)", ret);
917                 return false;
918         case DR_ECHO_REQUEST:
919                 echo_msg = msg;
920                 echo_response = nl_alloc_attr(dd->outgoing_skb->data,
921                                               FU_ECHO_RESPONSE,
922                                               sizeof(*echo_response));
923                 if (echo_response == NULL)
924                         goto alloc_attr_failure;
925                 echo_response->cookie = echo_msg->cookie;
926                 return true;
927         case DR_CHIP_READ:
928                 read_msg = msg;
929                 read_result = nl_alloc_attr(dd->outgoing_skb->data,
930                                 FU_CHIP_READ_RESULT,
931                                 sizeof(*read_result) + read_msg->length);
932                 if (read_result == NULL)
933                         goto alloc_attr_failure;
934                 read_result->address = read_msg->address;
935                 read_result->length = read_msg->length;
936                 ret = dd->chip.read(dd, read_msg->address, read_result->data,
937                                     read_msg->length);
938                 if (ret < 0)
939                         ERROR("failed to read from chip (%d)", ret);
940                 return true;
941         case DR_CHIP_WRITE:
942                 write_msg = msg;
943                 ret = dd->chip.write(dd, write_msg->address, write_msg->data,
944                                      write_msg->length);
945                 if (ret < 0)
946                         ERROR("failed to write chip (%d)", ret);
947                 return false;
948         case DR_CHIP_RESET:
949                 pdata->reset(pdata, ((struct dr_chip_reset *)msg)->state);
950                 return false;
951         case DR_GET_IRQLINE:
952                 irqline_status = nl_alloc_attr(dd->outgoing_skb->data,
953                                                FU_IRQLINE_STATUS,
954                                                sizeof(*irqline_status));
955                 if (irqline_status == NULL)
956                         goto alloc_attr_failure;
957                 irqline_status->status = pdata->irq(pdata);
958                 return true;
959         case DR_DELAY:
960                 delay_msg = msg;
961                 if (delay_msg->period > 1000)
962                         msleep(delay_msg->period / 1000);
963                 usleep_range(delay_msg->period % 1000,
964                             (delay_msg->period % 1000) + 10);
965                 return false;
966         case DR_CHIP_ACCESS_METHOD:
967                 chip_access_method_msg = msg;
968                 ret = set_chip_access_method(dd,
969                                              chip_access_method_msg->method);
970                 if (ret < 0)
971                         ERROR("failed to set chip access method (%d) (%d)",
972                               ret, chip_access_method_msg->method);
973                 return false;
974         case DR_CONFIG_IRQ:
975                 config_irq_msg = msg;
976                 if (config_irq_msg->irq_params > MAX_IRQ_PARAMS) {
977                         ERROR("too many IRQ parameters");
978                         return false;
979                 }
980                 memcpy(dd->irq_param, config_irq_msg->irq_param,
981                        config_irq_msg->irq_params * sizeof(dd->irq_param[0]));
982                 if (dd->irq_registered)
983                         return false;
984                 dd->service_irq = service_irq;
985                 ret = request_irq(dd->spi->irq, irq_handler,
986                         (config_irq_msg->irq_edge == DR_IRQ_RISING_EDGE) ?
987                                 IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING,
988                                                 pdata->nl_family, dd);
989                 if (ret < 0) {
990                         ERROR("failed to request IRQ (%d)", ret);
991                 } else {
992                         dd->irq_registered = true;
993                         wake_up_process(dd->thread);
994                 }
995                 return false;
996         case DR_CONFIG_INPUT:
997                 config_input_msg = msg;
998                 for (i = 0; i < INPUT_DEVICES; i++)
999                         if (dd->input_dev[i] != NULL)
1000                                 return false;
1001                 for (i = 0; i < INPUT_DEVICES; i++) {
1002                         dd->input_dev[i] = input_allocate_device();
1003                         if (dd->input_dev[i] == NULL) {
1004                                 ERROR("failed to allocate input device");
1005                                 continue;
1006                         }
1007                         snprintf(dd->input_phys, sizeof(dd->input_phys),
1008                                  "%s/input%d", dev_name(&dd->spi->dev), i);
1009                         dd->input_dev[i]->name = pdata->nl_family;
1010                         dd->input_dev[i]->phys = dd->input_phys;
1011                         dd->input_dev[i]->id.bustype = BUS_SPI;
1012 #if defined(CONFIG_PM_SLEEP) && INPUT_ENABLE_DISABLE
1013                         if (i == 0) {
1014                                 dd->input_dev[i]->enable = input_enable;
1015                                 dd->input_dev[i]->disable = input_disable;
1016                                 dd->input_dev[i]->enabled = true;
1017                                 input_set_drvdata(dd->input_dev[i], dd);
1018                         }
1019 #endif
1020 #if NV_ENABLE_CPU_BOOST
1021                         if (i == 0)
1022                                 input_set_capability(dd->input_dev[i], EV_MSC,
1023                                                      MSC_ACTIVITY);
1024 #endif
1025                         __set_bit(EV_SYN, dd->input_dev[i]->evbit);
1026                         __set_bit(EV_ABS, dd->input_dev[i]->evbit);
1027                         if (i == ID_STYLUS) {
1028                                 __set_bit(EV_KEY, dd->input_dev[i]->evbit);
1029                                 __set_bit(BTN_TOOL_RUBBER,
1030                                           dd->input_dev[i]->keybit);
1031                         }
1032                         input_set_abs_params(dd->input_dev[i],
1033                                              ABS_MT_POSITION_X, 0,
1034                                              config_input_msg->x_range, 0, 0);
1035                         input_set_abs_params(dd->input_dev[i],
1036                                              ABS_MT_POSITION_Y, 0,
1037                                              config_input_msg->y_range, 0, 0);
1038                         input_set_abs_params(dd->input_dev[i],
1039                                              ABS_MT_PRESSURE, 0, 0xFF, 0, 0);
1040                         input_set_abs_params(dd->input_dev[i],
1041                                              ABS_MT_TRACKING_ID, 0,
1042                                              MAX_INPUT_EVENTS, 0, 0);
1043
1044                         if (i == ID_STYLUS) {
1045                                 input_set_abs_params(dd->input_dev[i],
1046                                         ABS_MT_TOOL_TYPE, 0, MT_TOOL_MAX,
1047                                         0, 0);
1048                         } else {
1049                                 input_set_abs_params(dd->input_dev[i],
1050                                         ABS_MT_TOOL_TYPE, 0, MT_TOOL_FINGER,
1051                                         0, 0);
1052                         }
1053
1054                         ret = input_register_device(dd->input_dev[i]);
1055                         if (ret < 0) {
1056                                 input_free_device(dd->input_dev[i]);
1057                                 dd->input_dev[i] = NULL;
1058                                 ERROR("failed to register input device");
1059                         }
1060                 }
1061                 return false;
1062         case DR_CONFIG_WATCHDOG:
1063                 config_watchdog_msg = msg;
1064                 dd->fusion_process = (pid_t)config_watchdog_msg->pid;
1065                 dd->expect_resume_ack = false;
1066                 return false;
1067         case DR_DECONFIG:
1068                 if (dd->irq_registered) {
1069                         free_irq(dd->spi->irq, dd);
1070                         dd->irq_registered = false;
1071                 }
1072                 stop_scan_canned(dd);
1073                 if (!dd->input_no_deconfig) {
1074                         for (i = 0; i < INPUT_DEVICES; i++) {
1075                                 if (dd->input_dev[i] == NULL)
1076                                         continue;
1077                                 input_unregister_device(dd->input_dev[i]);
1078                                 dd->input_dev[i] = NULL;
1079                         }
1080                 }
1081 #if (INPUT_DEVICES > 1)
1082                 dd->last_finger_active = false;
1083                 dd->last_stylus_active = false;
1084 #endif
1085                 dd->expect_resume_ack = false;
1086                 dd->eraser_active = false;
1087                 dd->legacy_acceleration = false;
1088                 dd->service_irq = service_irq;
1089                 dd->fusion_process = (pid_t)0;
1090                 return false;
1091         case DR_INPUT:
1092                 input_msg = msg;
1093                 if (input_msg->events == 0) {
1094                         if (dd->eraser_active) {
1095                                 input_report_key(
1096                                         dd->input_dev[ID_STYLUS],
1097                                         BTN_TOOL_RUBBER, 0);
1098                                 dd->eraser_active = false;
1099                         }
1100                         for (i = 0; i < INPUT_DEVICES; i++) {
1101                                 input_mt_sync(dd->input_dev[i]);
1102                                 input_sync(dd->input_dev[i]);
1103                         }
1104 #if (INPUT_DEVICES > 1)
1105                         dd->last_finger_active = false;
1106                         dd->last_stylus_active = false;
1107 #endif
1108                 } else {
1109 #if (INPUT_DEVICES > 1)
1110                         bool current_finger_active = false;
1111                         bool current_stylus_active = false;
1112                         for (i = 0; i < input_msg->events; i++) {
1113                                 if (!current_finger_active &&
1114                                         (input_msg->event[i].tool_type
1115                                         == DR_INPUT_FINGER)) {
1116                                         current_finger_active = true;
1117                                 }
1118                                 if (!current_stylus_active &&
1119                                         ((input_msg->event[i].tool_type
1120                                         == DR_INPUT_STYLUS) ||
1121                                         (input_msg->event[i].tool_type
1122                                         == DR_INPUT_ERASER))) {
1123                                         current_stylus_active = true;
1124                                 }
1125                         }
1126 #if NV_STYLUS_FINGER_EXCLUSION
1127                         if (dd->last_finger_active && !dd->last_stylus_active &&
1128                                 current_stylus_active) {
1129 #else
1130                         if (dd->last_finger_active && !current_finger_active) {
1131 #endif
1132                                 input_mt_sync(dd->input_dev[ID_FINGER]);
1133                                 input_sync(dd->input_dev[ID_FINGER]);
1134                         }
1135                         if (dd->last_stylus_active && !current_stylus_active) {
1136                                 if (dd->eraser_active) {
1137                                         input_report_key(
1138                                                 dd->input_dev[ID_STYLUS],
1139                                                 BTN_TOOL_RUBBER, 0);
1140                                         dd->eraser_active = false;
1141                                 }
1142                                 input_mt_sync(dd->input_dev[ID_STYLUS]);
1143                                 input_sync(dd->input_dev[ID_STYLUS]);
1144                         }
1145                         dd->last_finger_active = current_finger_active;
1146                         dd->last_stylus_active = current_stylus_active;
1147 #endif
1148                         for (i = 0; i < input_msg->events; i++) {
1149 #if (INPUT_DEVICES > 1) && NV_STYLUS_FINGER_EXCLUSION
1150                                 if ((input_msg->event[i].tool_type
1151                                         == DR_INPUT_FINGER) &&
1152                                         current_stylus_active) {
1153                                         continue;
1154                                 }
1155 #endif
1156                                 switch (input_msg->event[i].tool_type) {
1157                                 case DR_INPUT_FINGER:
1158                                         inp = ID_FINGER;
1159                                         input_report_abs(dd->input_dev[inp],
1160                                                          ABS_MT_TOOL_TYPE,
1161                                                          MT_TOOL_FINGER);
1162                                         break;
1163                                 case DR_INPUT_STYLUS:
1164                                         inp = ID_STYLUS;
1165                                         input_report_abs(dd->input_dev[inp],
1166                                                          ABS_MT_TOOL_TYPE,
1167                                                          MT_TOOL_PEN);
1168                                         break;
1169                                 case DR_INPUT_ERASER:
1170                                         inp = ID_STYLUS;
1171                                         input_report_key(dd->input_dev[inp],
1172                                                 BTN_TOOL_RUBBER, 1);
1173                                         dd->eraser_active = true;
1174                                         break;
1175                                 default:
1176                                         inp = ID_FINGER;
1177                                         ERROR("invalid input tool type (%d)",
1178                                               input_msg->event[i].tool_type);
1179                                         break;
1180                                 }
1181                                 input_report_abs(dd->input_dev[inp],
1182                                                  ABS_MT_TRACKING_ID,
1183                                                  input_msg->event[i].id);
1184                                 input_report_abs(dd->input_dev[inp],
1185                                                  ABS_MT_POSITION_X,
1186                                                  input_msg->event[i].x);
1187                                 input_report_abs(dd->input_dev[inp],
1188                                                  ABS_MT_POSITION_Y,
1189                                                  input_msg->event[i].y);
1190                                 input_report_abs(dd->input_dev[inp],
1191                                                  ABS_MT_PRESSURE,
1192                                                  input_msg->event[i].z);
1193                                 input_mt_sync(dd->input_dev[inp]);
1194                         }
1195                         for (i = 0; i < INPUT_DEVICES; i++)
1196                                 input_sync(dd->input_dev[i]);
1197                 }
1198                 return false;
1199         case DR_RESUME_ACK:
1200                 dd->expect_resume_ack = false;
1201                 if (dd->irq_registered)
1202                         enable_irq(dd->spi->irq);
1203                 return false;
1204         case DR_LEGACY_FWDL:
1205                 ret = fw_request_load(dd);
1206                 if (ret < 0)
1207                         ERROR("firmware download failed (%d)", ret);
1208                 else
1209                         INFO("firmware download OK");
1210                 return false;
1211         case DR_LEGACY_ACCELERATION:
1212                 legacy_acceleration_msg = msg;
1213                 if (legacy_acceleration_msg->enable) {
1214                         dd->service_irq = service_irq_legacy_acceleration;
1215                         start_legacy_acceleration(dd);
1216                         dd->legacy_acceleration = true;
1217                 } else {
1218                         stop_legacy_acceleration(dd);
1219                         dd->legacy_acceleration = false;
1220                         dd->service_irq = service_irq;
1221                 }
1222                 return false;
1223         default:
1224                 ERROR("unexpected message %d", msg_id);
1225                 return false;
1226         }
1227
1228 alloc_attr_failure:
1229         ERROR("failed to allocate response for msg_id %d", msg_id);
1230         return false;
1231 }
1232
1233 static int nl_process_msg(struct dev_data *dd, struct sk_buff *skb)
1234 {
1235         struct nlattr  *attr;
1236         bool           send_reply = false;
1237         int            ret = 0, ret2;
1238
1239         /* process incoming message */
1240         attr = NL_ATTR_FIRST(skb->data);
1241         for (; attr < NL_ATTR_LAST(skb->data); attr = NL_ATTR_NEXT(attr)) {
1242                 if (nl_process_driver_msg(dd, attr->nla_type,
1243                                           NL_ATTR_VAL(attr, void)))
1244                         send_reply = true;
1245         }
1246
1247         /* send back reply if requested */
1248         if (send_reply) {
1249                 (void)skb_put(dd->outgoing_skb,
1250                               NL_SIZE(dd->outgoing_skb->data));
1251                 if (NL_SEQ(skb->data) == 0)
1252                         ret = genlmsg_unicast(sock_net(skb->sk),
1253                                               dd->outgoing_skb,
1254                                               NETLINK_CB(skb).pid);
1255                 else
1256                         ret = genlmsg_multicast(dd->outgoing_skb, 0,
1257                                         dd->nl_mc_groups[MC_FUSION].id,
1258                                         GFP_KERNEL);
1259                 if (ret < 0)
1260                         ERROR("could not reply to fusion (%d)", ret);
1261
1262                 /* allocate new outgoing skb */
1263                 ret2 = nl_msg_new(dd, MC_FUSION);
1264                 if (ret2 < 0)
1265                         ERROR("could not allocate outgoing skb (%d)", ret2);
1266         }
1267
1268         /* free incoming message */
1269         kfree_skb(skb);
1270         return ret;
1271 }
1272
1273 static int
1274 nl_callback_driver(struct sk_buff *skb, struct genl_info *info)
1275 {
1276         struct dev_data  *dd;
1277         struct sk_buff   *skb2;
1278         unsigned long    flags;
1279
1280         /* locate device structure */
1281         spin_lock_irqsave(&dev_lock, flags);
1282         list_for_each_entry(dd, &dev_list, dev_list)
1283                 if (dd->nl_family.id == NL_TYPE(skb->data))
1284                         break;
1285         spin_unlock_irqrestore(&dev_lock, flags);
1286         if (&dd->dev_list == &dev_list)
1287                 return -ENODEV;
1288         if (!dd->nl_enabled)
1289                 return -EAGAIN;
1290
1291         /* queue incoming skb and wake up processing thread */
1292         skb2 = skb_clone(skb, GFP_ATOMIC);
1293         if (skb2 == NULL) {
1294                 ERROR("failed to clone incoming skb");
1295                 return -ENOMEM;
1296         } else {
1297                 skb_queue_tail(&dd->incoming_skb_queue, skb2);
1298                 wake_up_process(dd->thread);
1299                 return 0;
1300         }
1301 }
1302
1303 static int
1304 nl_callback_fusion(struct sk_buff *skb, struct genl_info *info)
1305 {
1306         struct dev_data  *dd;
1307         unsigned long    flags;
1308
1309         /* locate device structure */
1310         spin_lock_irqsave(&dev_lock, flags);
1311         list_for_each_entry(dd, &dev_list, dev_list)
1312                 if (dd->nl_family.id == NL_TYPE(skb->data))
1313                         break;
1314         spin_unlock_irqrestore(&dev_lock, flags);
1315         if (&dd->dev_list == &dev_list)
1316                 return -ENODEV;
1317         if (!dd->nl_enabled)
1318                 return -EAGAIN;
1319
1320         (void)genlmsg_multicast(skb_clone(skb, GFP_ATOMIC), 0,
1321                                 dd->nl_mc_groups[MC_FUSION].id, GFP_ATOMIC);
1322         return 0;
1323 }
1324
1325 /****************************************************************************\
1326 * Interrupt processing                                                       *
1327 \****************************************************************************/
1328
1329 static irqreturn_t irq_handler(int irq, void *context)
1330 {
1331         struct dev_data  *dd = context;
1332
1333         trace_touchscreen_maxim_irq("irq_handler");
1334
1335 #if NV_ENABLE_CPU_BOOST
1336         if (time_after(jiffies, dd->last_irq_jiffies + INPUT_IDLE_PERIOD))
1337                 input_event(dd->input_dev[0], EV_MSC, MSC_ACTIVITY, 1);
1338         dd->last_irq_jiffies = jiffies;
1339 #endif
1340
1341         wake_up_process(dd->thread);
1342         return IRQ_HANDLED;
1343 }
1344
1345 static void service_irq_legacy_acceleration(struct dev_data *dd)
1346 {
1347         struct fu_async_data  *async_data;
1348         u16                   len, rx_len = 0, offset = 0;
1349         u16                   buf[255], rx_limit = 250 * sizeof(u16);
1350         int                   ret = 0, counter = 0;
1351
1352         async_data = nl_alloc_attr(dd->outgoing_skb->data, FU_ASYNC_DATA,
1353                                    sizeof(*async_data) + dd->irq_param[4] +
1354                                    2 * sizeof(u16));
1355         if (async_data == NULL) {
1356                 ERROR("can't add data to async IRQ buffer");
1357                 return;
1358         }
1359         async_data->length = dd->irq_param[4] + 2 * sizeof(u16);
1360         len = async_data->length;
1361         async_data->address = 0;
1362
1363         while (len > 0) {
1364                 rx_len = (len > rx_limit) ? rx_limit : len;
1365                 ret = spi_read_123(dd, 0x0000, (u8 *)&buf,
1366                                         rx_len + 4 * sizeof(u16), false);
1367                 if (ret < 0)
1368                         break;
1369
1370                 if (buf[3] == 0xBABE) {
1371                         dd->legacy_acceleration = false;
1372                         dd->service_irq = service_irq;
1373                         nl_msg_init(dd->outgoing_skb->data, dd->nl_family.id,
1374                                     dd->nl_seq - 1, MC_FUSION);
1375                         return;
1376                 }
1377
1378                 if (rx_limit == rx_len)
1379                         usleep_range(200, 300);
1380
1381                 if (buf[0] == 0x6060) {
1382                         ERROR("data not ready");
1383                         start_legacy_acceleration_canned(dd);
1384                         ret = -EBUSY;
1385                         break;
1386                 } else if (buf[0] == 0x8070) {
1387                         if (buf[1] == dd->irq_param[1] ||
1388                                         buf[1] == dd->irq_param[2])
1389                                 async_data->address = buf[1];
1390
1391                         if (async_data->address +
1392                                         offset / sizeof(u16) != buf[1]) {
1393                                 ERROR("sequence number incorrect %04X", buf[1]);
1394                                 start_legacy_acceleration_canned(dd);
1395                                 ret = -EBUSY;
1396                                 break;
1397                         }
1398                 }
1399                 counter++;
1400                 memcpy(async_data->data + offset, buf + 4, rx_len);
1401                 offset += rx_len;
1402                 len -= rx_len;
1403         }
1404         async_data->status = *(buf + rx_len / sizeof(u16) + 2);
1405
1406         if (ret < 0) {
1407                 ERROR("can't read IRQ buffer (%d)", ret);
1408                 nl_msg_init(dd->outgoing_skb->data, dd->nl_family.id,
1409                             dd->nl_seq - 1, MC_FUSION);
1410         } else {
1411                 (void)skb_put(dd->outgoing_skb,
1412                               NL_SIZE(dd->outgoing_skb->data));
1413                 ret = genlmsg_multicast(dd->outgoing_skb, 0,
1414                                         dd->nl_mc_groups[MC_FUSION].id,
1415                                         GFP_KERNEL);
1416                 if (ret < 0) {
1417                         ERROR("can't send IRQ buffer %d", ret);
1418                         msleep(300);
1419                         if (++dd->send_fail_count >= 10 &&
1420                             dd->fusion_process != (pid_t)0) {
1421                                 (void)kill_pid(
1422                                         find_get_pid(dd->fusion_process),
1423                                         SIGKILL, 1);
1424                                 wake_up_process(dd->thread);
1425                         }
1426                 } else {
1427                         dd->send_fail_count = 0;
1428                 }
1429                 ret = nl_msg_new(dd, MC_FUSION);
1430                 if (ret < 0)
1431                         ERROR("could not allocate outgoing skb (%d)", ret);
1432         }
1433 }
1434
1435 static void service_irq(struct dev_data *dd)
1436 {
1437         struct fu_async_data  *async_data;
1438         u16                   status, clear, test, address[2], xbuf;
1439         bool                  read_buf[2] = {true, false};
1440         int                   ret, ret2;
1441
1442         ret = dd->chip.read(dd, dd->irq_param[0], (u8 *)&status,
1443                             sizeof(status));
1444         if (ret < 0) {
1445                 ERROR("can't read IRQ status (%d)", ret);
1446                 return;
1447         }
1448
1449         if (status & dd->irq_param[10]) {
1450                 read_buf[0] = false;
1451                 clear = 0xFFFF;
1452         } else if (status & dd->irq_param[9]) {
1453                 test = status & (dd->irq_param[6] | dd->irq_param[7]);
1454
1455                 if (test == (dd->irq_param[6] | dd->irq_param[7]))
1456                         xbuf = ((status & dd->irq_param[5]) != 0) ? 0 : 1;
1457                 else if (test == dd->irq_param[6])
1458                         xbuf = 0;
1459                 else if (test == dd->irq_param[7])
1460                         xbuf = 1;
1461                 else {
1462                         ERROR("unexpected IRQ handler case");
1463                         return;
1464                 }
1465                 read_buf[1] = true;
1466                 address[1] = xbuf ? dd->irq_param[2] : dd->irq_param[1];
1467
1468                 address[0] = dd->irq_param[3];
1469                 clear = dd->irq_param[6] | dd->irq_param[7] |
1470                         dd->irq_param[8] | dd->irq_param[9];
1471         } else {
1472                 test = status & (dd->irq_param[6] | dd->irq_param[7]);
1473
1474                 if (test == 0)
1475                         return;
1476                 else if (test == (dd->irq_param[6] | dd->irq_param[7]))
1477                         xbuf = ((status & dd->irq_param[5]) == 0) ? 0 : 1;
1478                 else if (test == dd->irq_param[6])
1479                         xbuf = 0;
1480                 else if (test == dd->irq_param[7])
1481                         xbuf = 1;
1482                 else {
1483                         ERROR("unexpected IRQ handler case");
1484                         return;
1485                 }
1486
1487                 address[0] = xbuf ? dd->irq_param[2] : dd->irq_param[1];
1488                 clear = xbuf ? dd->irq_param[7] : dd->irq_param[6];
1489                 clear |= dd->irq_param[8];
1490         }
1491
1492         async_data = nl_alloc_attr(dd->outgoing_skb->data, FU_ASYNC_DATA,
1493                                    sizeof(*async_data) + dd->irq_param[4]);
1494         if (async_data == NULL) {
1495                 ERROR("can't add data to async IRQ buffer 1");
1496                 return;
1497         }
1498
1499         async_data->status = status;
1500         if (read_buf[0]) {
1501                 async_data->address = address[0];
1502                 async_data->length = dd->irq_param[4];
1503                 ret = dd->chip.read(dd, address[0], async_data->data,
1504                                     dd->irq_param[4]);
1505         }
1506
1507         if (read_buf[1] && ret == 0) {
1508                 async_data = nl_alloc_attr(dd->outgoing_skb->data,
1509                                            FU_ASYNC_DATA,
1510                                            sizeof(*async_data) +
1511                                                 dd->irq_param[4]);
1512                 if (async_data == NULL) {
1513                         ERROR("can't add data to async IRQ buffer 2");
1514                         nl_msg_init(dd->outgoing_skb->data, dd->nl_family.id,
1515                                     dd->nl_seq - 1, MC_FUSION);
1516                         return;
1517                 }
1518                 async_data->address = address[1];
1519                 async_data->length = dd->irq_param[4];
1520                 async_data->status = status;
1521                 ret = dd->chip.read(dd, address[1], async_data->data,
1522                                     dd->irq_param[4]);
1523         }
1524
1525         ret2 = dd->chip.write(dd, dd->irq_param[0], (u8 *)&clear,
1526                              sizeof(clear));
1527         if (ret2 < 0)
1528                 ERROR("can't clear IRQ status (%d)", ret2);
1529
1530         if (ret < 0) {
1531                 ERROR("can't read IRQ buffer (%d)", ret);
1532                 nl_msg_init(dd->outgoing_skb->data, dd->nl_family.id,
1533                             dd->nl_seq - 1, MC_FUSION);
1534         } else {
1535                 (void)skb_put(dd->outgoing_skb,
1536                               NL_SIZE(dd->outgoing_skb->data));
1537                 ret = genlmsg_multicast(dd->outgoing_skb, 0,
1538                                         dd->nl_mc_groups[MC_FUSION].id,
1539                                         GFP_KERNEL);
1540                 if (ret < 0) {
1541                         ERROR("can't send IRQ buffer %d", ret);
1542                         msleep(300);
1543                         if (read_buf[0] == false ||
1544                             (++dd->send_fail_count >= 10 &&
1545                              dd->fusion_process != (pid_t)0)) {
1546                                 (void)kill_pid(
1547                                         find_get_pid(dd->fusion_process),
1548                                         SIGKILL, 1);
1549                                 wake_up_process(dd->thread);
1550                         }
1551                 } else {
1552                         dd->send_fail_count = 0;
1553                 }
1554                 ret = nl_msg_new(dd, MC_FUSION);
1555                 if (ret < 0)
1556                         ERROR("could not allocate outgoing skb (%d)", ret);
1557         }
1558 }
1559
1560 /****************************************************************************\
1561 * Processing thread                                                          *
1562 \****************************************************************************/
1563
1564 static int processing_thread(void *arg)
1565 {
1566         struct dev_data         *dd = arg;
1567         struct maxim_sti_pdata  *pdata = dd->spi->dev.platform_data;
1568         struct sk_buff          *skb;
1569         char                    *argv[] = { pdata->touch_fusion, "daemon",
1570                                             pdata->nl_family,
1571                                             pdata->config_file, NULL };
1572         int                     ret, ret2;
1573         bool                    fusion_dead;
1574
1575         sched_setscheduler(current, SCHED_FIFO, &dd->thread_sched);
1576
1577         while (!kthread_should_stop()) {
1578                 set_current_state(TASK_INTERRUPTIBLE);
1579
1580                 /* ensure that we have outgoing skb */
1581                 if (dd->outgoing_skb == NULL)
1582                         if (nl_msg_new(dd, MC_FUSION) < 0) {
1583                                 schedule();
1584                                 continue;
1585                         }
1586
1587                 /* priority 1: start up fusion process */
1588                 if (dd->fusion_process != (pid_t)0 && get_pid_task(
1589                                         find_get_pid(dd->fusion_process),
1590                                         PIDTYPE_PID) == NULL &&
1591                                         !dd->suspend_in_progress) {
1592                         stop_scan_canned(dd);
1593                         dd->start_fusion = true;
1594                         dd->fusion_process = (pid_t)0;
1595 #if INPUT_ENABLE_DISABLE
1596                         dd->input_no_deconfig = true;
1597 #endif
1598                 }
1599                 if (dd->start_fusion) {
1600                         do {
1601                                 ret = call_usermodehelper(argv[0], argv, NULL,
1602                                                           UMH_WAIT_EXEC);
1603                                 if (ret != 0)
1604                                         msleep(100);
1605                         } while (ret != 0 && !kthread_should_stop());
1606                         dd->start_fusion = false;
1607                 }
1608                 if (kthread_should_stop())
1609                         break;
1610
1611                 /* priority 2: process pending Netlink messages */
1612                 while ((skb = skb_dequeue(&dd->incoming_skb_queue)) != NULL) {
1613                         if (kthread_should_stop())
1614                                 break;
1615                         if (nl_process_msg(dd, skb) < 0)
1616                                 skb_queue_purge(&dd->incoming_skb_queue);
1617                 }
1618                 if (kthread_should_stop())
1619                         break;
1620
1621                 /* priority 3: suspend/resume */
1622                 if (dd->suspend_in_progress) {
1623                         if (dd->irq_registered)
1624                                 disable_irq(dd->spi->irq);
1625                         stop_scan_canned(dd);
1626                         complete(&dd->suspend_resume);
1627
1628                         INFO("%s: suspended.", __func__);
1629                         while (!dd->resume_in_progress) {
1630                                 /* the line below is a MUST */
1631                                 set_current_state(TASK_INTERRUPTIBLE);
1632                                 schedule();
1633                         }
1634
1635                         INFO("%s: resuming.", __func__);
1636
1637 #if !SUSPEND_POWER_OFF
1638                         start_scan_canned(dd);
1639 #endif
1640                         dd->resume_in_progress = false;
1641                         dd->suspend_in_progress = false;
1642                         complete(&dd->suspend_resume);
1643
1644                         fusion_dead = false;
1645                         do {
1646                                 if (dd->fusion_process != (pid_t)0 &&
1647                                     get_pid_task(find_get_pid(
1648                                                         dd->fusion_process),
1649                                                  PIDTYPE_PID) == NULL) {
1650                                         fusion_dead = true;
1651                                         break;
1652                                 }
1653                                 ret = nl_add_attr(dd->outgoing_skb->data,
1654                                                   FU_RESUME, NULL, 0);
1655                                 if (ret < 0) {
1656                                         ERROR("can't add data to resume " \
1657                                               "buffer");
1658                                         nl_msg_init(dd->outgoing_skb->data,
1659                                                     dd->nl_family.id,
1660                                                     dd->nl_seq - 1, MC_FUSION);
1661                                         msleep(100);
1662                                         continue;
1663                                 }
1664                                 (void)skb_put(dd->outgoing_skb,
1665                                               NL_SIZE(dd->outgoing_skb->data));
1666                                 ret = genlmsg_multicast(dd->outgoing_skb, 0,
1667                                                 dd->nl_mc_groups[MC_FUSION].id,
1668                                                 GFP_KERNEL);
1669                                 if (ret < 0) {
1670                                         ERROR("can't send resume message %d",
1671                                               ret);
1672                                         msleep(100);
1673                                 }
1674                                 ret2 = nl_msg_new(dd, MC_FUSION);
1675                                 if (ret2 < 0)
1676                                         ERROR("could not allocate outgoing " \
1677                                               "skb (%d)", ret2);
1678                         } while (ret != 0);
1679                         if (fusion_dead)
1680                                 continue;
1681                         if (ret == 0)
1682                                 INFO("%s: resumed.", __func__);
1683                 }
1684
1685                 /* priority 4: service interrupt */
1686                 if (dd->irq_registered && !dd->expect_resume_ack &&
1687                     pdata->irq(pdata) == 0)
1688                         dd->service_irq(dd);
1689                 if (dd->irq_registered && !dd->expect_resume_ack &&
1690                     pdata->irq(pdata) == 0)
1691                         continue;
1692
1693                 /* nothing more to do; sleep */
1694                 schedule();
1695         }
1696
1697         return 0;
1698 }
1699
1700 /****************************************************************************\
1701 * Driver initialization                                                      *
1702 \****************************************************************************/
1703
1704 static int probe(struct spi_device *spi)
1705 {
1706         struct maxim_sti_pdata  *pdata = spi->dev.platform_data;
1707         struct dev_data         *dd;
1708         unsigned long           flags;
1709         int                     ret, i;
1710         void                    *ptr;
1711
1712         /* validate platform data */
1713         if (pdata == NULL || pdata->init == NULL || pdata->reset == NULL ||
1714                 pdata->irq == NULL || pdata->touch_fusion == NULL ||
1715                 pdata->config_file == NULL || pdata->nl_family == NULL ||
1716                 GENL_CHK(pdata->nl_family) ||
1717                 pdata->nl_mc_groups < MC_REQUIRED_GROUPS ||
1718                 pdata->chip_access_method == 0 ||
1719                 pdata->chip_access_method > ARRAY_SIZE(chip_access_methods) ||
1720                 pdata->default_reset_state > 1)
1721                         return -EINVAL;
1722
1723         /* device context: allocate structure */
1724         dd = kzalloc(sizeof(*dd) + pdata->tx_buf_size + pdata->rx_buf_size +
1725                      sizeof(*dd->nl_ops) * pdata->nl_mc_groups +
1726                      sizeof(*dd->nl_mc_groups) * pdata->nl_mc_groups,
1727                      GFP_KERNEL);
1728         if (dd == NULL)
1729                 return -ENOMEM;
1730
1731         /* device context: set up dynamic allocation pointers */
1732         ptr = (void *)dd + sizeof(*dd);
1733         if (pdata->tx_buf_size > 0) {
1734                 dd->tx_buf = ptr;
1735                 ptr += pdata->tx_buf_size;
1736         }
1737         if (pdata->rx_buf_size > 0) {
1738                 dd->rx_buf = ptr;
1739                 ptr += pdata->rx_buf_size;
1740         }
1741         dd->nl_ops = ptr;
1742         ptr += sizeof(*dd->nl_ops) * pdata->nl_mc_groups;
1743         dd->nl_mc_groups = ptr;
1744
1745         /* device context: initialize structure members */
1746         spi_set_drvdata(spi, dd);
1747         dd->spi = spi;
1748         dd->nl_seq = 1;
1749         init_completion(&dd->suspend_resume);
1750         memset(dd->tx_buf, 0xFF, pdata->tx_buf_size);
1751         (void)set_chip_access_method(dd, pdata->chip_access_method);
1752
1753         /* initialize regulators */
1754         regulator_init(dd);
1755
1756         /* initialize platform */
1757         ret = pdata->init(pdata, true);
1758         if (ret < 0)
1759                 goto platform_failure;
1760
1761         /* power-up and reset-high */
1762         ret = regulator_control(dd, true);
1763         if (ret < 0)
1764                 goto platform_failure;
1765         usleep_range(300, 400);
1766         pdata->reset(pdata, 1);
1767
1768         /* Netlink: initialize incoming skb queue */
1769         skb_queue_head_init(&dd->incoming_skb_queue);
1770
1771         /* start processing thread */
1772         dd->thread_sched.sched_priority = MAX_USER_RT_PRIO / 2;
1773         dd->thread = kthread_run(processing_thread, dd, pdata->nl_family);
1774         if (IS_ERR(dd->thread)) {
1775                 ret = PTR_ERR(dd->thread);
1776                 goto platform_failure;
1777         }
1778
1779         /* Netlink: register GENL family */
1780         dd->nl_family.id      = GENL_ID_GENERATE;
1781         dd->nl_family.version = NL_FAMILY_VERSION;
1782         GENL_COPY(dd->nl_family.name, pdata->nl_family);
1783         ret = genl_register_family(&dd->nl_family);
1784         if (ret < 0)
1785                 goto nl_family_failure;
1786
1787         /* Netlink: register family ops */
1788         for (i = 0; i < MC_REQUIRED_GROUPS; i++) {
1789                 dd->nl_ops[i].cmd = i;
1790                 dd->nl_ops[i].doit = nl_callback_noop;
1791         }
1792         dd->nl_ops[MC_DRIVER].doit = nl_callback_driver;
1793         dd->nl_ops[MC_FUSION].doit = nl_callback_fusion;
1794         for (i = 0; i < MC_REQUIRED_GROUPS; i++) {
1795                 ret = genl_register_ops(&dd->nl_family, &dd->nl_ops[i]);
1796                 if (ret < 0)
1797                         goto nl_failure;
1798         }
1799
1800         /* Netlink: register family multicast groups */
1801         GENL_COPY(dd->nl_mc_groups[MC_DRIVER].name, MC_DRIVER_NAME);
1802         GENL_COPY(dd->nl_mc_groups[MC_FUSION].name, MC_FUSION_NAME);
1803         for (i = 0; i < MC_REQUIRED_GROUPS; i++) {
1804                 ret = genl_register_mc_group(&dd->nl_family,
1805                                              &dd->nl_mc_groups[i]);
1806                 if (ret < 0)
1807                         goto nl_failure;
1808         }
1809         dd->nl_mc_group_count = MC_REQUIRED_GROUPS;
1810
1811         /* Netlink: pre-allocate outgoing skb */
1812         ret = nl_msg_new(dd, MC_FUSION);
1813         if (ret < 0)
1814                 goto nl_failure;
1815
1816         /* Netlink: ready to start processing incoming messages */
1817         dd->nl_enabled = true;
1818
1819         /* add us to the devices list */
1820         spin_lock_irqsave(&dev_lock, flags);
1821         list_add_tail(&dd->dev_list, &dev_list);
1822         spin_unlock_irqrestore(&dev_lock, flags);
1823
1824 #if NV_ENABLE_CPU_BOOST
1825         dd->last_irq_jiffies = jiffies;
1826 #endif
1827
1828         /* start up Touch Fusion */
1829         dd->start_fusion = true;
1830         wake_up_process(dd->thread);
1831         INFO("driver loaded; version %s; release date %s", DRIVER_VERSION,
1832              DRIVER_RELEASE);
1833
1834         return 0;
1835
1836 nl_failure:
1837         genl_unregister_family(&dd->nl_family);
1838 nl_family_failure:
1839         (void)kthread_stop(dd->thread);
1840 platform_failure:
1841         pdata->init(pdata, false);
1842         kfree(dd);
1843         return ret;
1844 }
1845
1846 static int remove(struct spi_device *spi)
1847 {
1848         struct maxim_sti_pdata  *pdata = spi->dev.platform_data;
1849         struct dev_data         *dd = spi_get_drvdata(spi);
1850         unsigned long           flags;
1851         u8                      i;
1852
1853         INFO("removing...\n");
1854
1855         if (dd->fusion_process != (pid_t)0)
1856                 (void)kill_pid(find_get_pid(dd->fusion_process), SIGKILL, 1);
1857
1858         /* BEWARE: tear-down sequence below is carefully staged:            */
1859         /* 1) first the feeder of Netlink messages to the processing thread */
1860         /*    is turned off                                                 */
1861         /* 2) then the thread itself is shut down                           */
1862         /* 3) then Netlink family is torn down since no one would be using  */
1863         /*    it at this point                                              */
1864         /* 4) above step (3) insures that all Netlink senders are           */
1865         /*    definitely gone and it is safe to free up outgoing skb buffer */
1866         /*    and incoming skb queue                                        */
1867         dd->nl_enabled = false;
1868         (void)kthread_stop(dd->thread);
1869         genl_unregister_family(&dd->nl_family);
1870         kfree_skb(dd->outgoing_skb);
1871         skb_queue_purge(&dd->incoming_skb_queue);
1872
1873         for (i = 0; i < INPUT_DEVICES; i++)
1874                 if (dd->input_dev[i])
1875                         input_unregister_device(dd->input_dev[i]);
1876
1877         if (dd->irq_registered)
1878                 free_irq(dd->spi->irq, dd);
1879
1880         stop_scan_canned(dd);
1881
1882         spin_lock_irqsave(&dev_lock, flags);
1883         list_del(&dd->dev_list);
1884         spin_unlock_irqrestore(&dev_lock, flags);
1885
1886         pdata->reset(pdata, 0);
1887         usleep_range(100, 120);
1888         regulator_control(dd, false);
1889         pdata->init(pdata, false);
1890
1891         kfree(dd);
1892
1893         INFO("driver unloaded");
1894         return 0;
1895 }
1896
1897 static void shutdown(struct spi_device *spi)
1898 {
1899         struct maxim_sti_pdata  *pdata = spi->dev.platform_data;
1900         struct dev_data         *dd = spi_get_drvdata(spi);
1901
1902         INFO("doing shutdown...\n");
1903
1904         pdata->reset(pdata, 0);
1905         usleep_range(100, 120);
1906         regulator_control(dd, false);
1907 }
1908
1909 /****************************************************************************\
1910 * Module initialization                                                      *
1911 \****************************************************************************/
1912
1913 static const struct spi_device_id id[] = {
1914         { MAXIM_STI_NAME, 0 },
1915         { }
1916 };
1917
1918 MODULE_DEVICE_TABLE(spi, id);
1919
1920 static struct spi_driver driver = {
1921         .probe          = probe,
1922         .remove         = remove,
1923         .shutdown       = shutdown,
1924         .id_table       = id,
1925         .driver = {
1926                 .name   = MAXIM_STI_NAME,
1927                 .owner  = THIS_MODULE,
1928 #if defined(CONFIG_PM_SLEEP) && !INPUT_ENABLE_DISABLE
1929                 .pm     = &pm_ops,
1930 #endif
1931         },
1932 };
1933
1934 static int __devinit maxim_sti_init(void)
1935 {
1936         INIT_LIST_HEAD(&dev_list);
1937         prev_dvdd_rail_state = 0;
1938         spin_lock_init(&dev_lock);
1939         return spi_register_driver(&driver);
1940 }
1941
1942 static void __exit maxim_sti_exit(void)
1943 {
1944         spi_unregister_driver(&driver);
1945 }
1946
1947 module_init(maxim_sti_init);
1948 module_exit(maxim_sti_exit);
1949
1950 MODULE_AUTHOR("Maxim Integrated Products, Inc.");
1951 MODULE_DESCRIPTION("Maxim SmartTouch Imager Touchscreen Driver");
1952 MODULE_LICENSE("GPL v2");
1953 MODULE_VERSION(DRIVER_VERSION);
1954