input: touch: maxim_sti: Check thread stop condition
[linux-2.6.git] / drivers / input / touchscreen / maxim_sti.c
1 /* drivers/input/touchscreen/maxim_sti.c
2  *
3  * Maxim SmartTouch Imager Touchscreen Driver
4  *
5  * Copyright (c)2013 Maxim Integrated Products, Inc.
6  * Copyright (C) 2013, NVIDIA Corporation.  All Rights Reserved.
7  *
8  * This software is licensed under the terms of the GNU General Public
9  * License version 2, as published by the Free Software Foundation, and
10  * may be copied, distributed, and modified under those terms.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  */
18
19 #include <linux/module.h>
20 #include <linux/kmod.h>
21 #include <linux/kthread.h>
22 #include <linux/spi/spi.h>
23 #include <linux/firmware.h>
24 #include <linux/crc16.h>
25 #include <linux/interrupt.h>
26 #include <linux/input.h>
27 #include <linux/regulator/consumer.h>
28 #include <linux/maxim_sti.h>
29 #include <asm/byteorder.h>  /* MUST include this header to get byte order */
30
31 #ifdef CONFIG_PM_WAKELOCKS
32 #include <linux/pm_wakeup.h>
33 #endif
34
35 #define CREATE_TRACE_POINTS
36 #include <trace/events/touchscreen_maxim.h>
37
38 /****************************************************************************\
39 * Custom features                                                            *
40 \****************************************************************************/
41
42 #define INPUT_DEVICES               2
43 #define INPUT_ENABLE_DISABLE        1
44 #define SUSPEND_POWER_OFF           1
45 #define NV_ENABLE_CPU_BOOST         1
46 #define NV_STYLUS_FINGER_EXCLUSION  1
47
48 #define ID_FINGER   0
49 #define ID_STYLUS   (INPUT_DEVICES - 1)
50
51 #if NV_ENABLE_CPU_BOOST
52 #define INPUT_IDLE_PERIOD     (msecs_to_jiffies(50))
53 #endif
54
55 /****************************************************************************\
56 * Device context structure, globals, and macros                              *
57 \****************************************************************************/
58
59 struct dev_data;
60
61 struct chip_access_method {
62         int (*read)(struct dev_data *dd, u16 address, u8 *buf, u16 len);
63         int (*write)(struct dev_data *dd, u16 address, u8 *buf, u16 len);
64 };
65
66 struct dev_data {
67         u8                           *tx_buf;
68         u8                           *rx_buf;
69         u8                           send_fail_count;
70         u32                          nl_seq;
71         u8                           nl_mc_group_count;
72         bool                         nl_enabled;
73         bool                         start_fusion;
74         bool                         suspend_in_progress;
75         bool                         resume_in_progress;
76         bool                         input_ignore;
77         bool                         eraser_active;
78 #if (INPUT_DEVICES > 1)
79         bool                         last_finger_active;
80         bool                         last_stylus_active;
81 #endif
82         bool                         legacy_acceleration;
83 #if INPUT_ENABLE_DISABLE
84         bool                         input_no_deconfig;
85 #endif
86         bool                         irq_registered;
87         u16                          irq_param[MAX_IRQ_PARAMS];
88         pid_t                        fusion_process;
89         char                         input_phys[128];
90         struct input_dev             *input_dev[INPUT_DEVICES];
91         struct completion            suspend_resume;
92         struct chip_access_method    chip;
93         struct spi_device            *spi;
94         struct genl_family           nl_family;
95         struct genl_ops              *nl_ops;
96         struct genl_multicast_group  *nl_mc_groups;
97         struct sk_buff               *outgoing_skb;
98         struct sk_buff_head          incoming_skb_queue;
99         struct task_struct           *thread;
100         struct sched_param           thread_sched;
101         struct list_head             dev_list;
102         struct regulator             *reg_avdd;
103         struct regulator             *reg_dvdd;
104         void                         (*service_irq)(struct dev_data *dd);
105 #if NV_ENABLE_CPU_BOOST
106         unsigned long                last_irq_jiffies;
107 #endif
108 #ifdef CONFIG_PM_WAKELOCKS
109         struct wakeup_source         ws;
110 #endif
111 };
112
113 static struct list_head  dev_list;
114 static spinlock_t        dev_lock;
115
116 static irqreturn_t irq_handler(int irq, void *context);
117 static void service_irq(struct dev_data *dd);
118 static void service_irq_legacy_acceleration(struct dev_data *dd);
119
120 #define ERROR(a, b...) printk(KERN_ERR "%s driver(ERROR:%s:%d): " a "\n", \
121                               dd->nl_family.name, __func__, __LINE__, ##b)
122 #define INFO(a, b...) printk(KERN_INFO "%s driver: " a "\n", \
123                              dd->nl_family.name, ##b)
124
125 /****************************************************************************\
126 * Chip access methods                                                        *
127 \****************************************************************************/
128
129 static inline int
130 spi_read_123(struct dev_data *dd, u16 address, u8 *buf, u16 len, bool add_len)
131 {
132         struct spi_message   message;
133         struct spi_transfer  transfer;
134         u16                  *tx_buf = (u16 *)dd->tx_buf;
135         u16                  *rx_buf = (u16 *)dd->rx_buf;
136         u16                  words = len / sizeof(u16), header_len = 1;
137         u16                  *ptr2 = rx_buf + 1;
138 #ifdef __LITTLE_ENDIAN
139         u16                  *ptr1 = (u16 *)buf, i;
140 #endif
141         int                  ret;
142
143         if (tx_buf == NULL || rx_buf == NULL)
144                 return -ENOMEM;
145
146         tx_buf[0] = (address << 1) | 0x0001;
147 #ifdef __LITTLE_ENDIAN
148         tx_buf[0] = (tx_buf[0] << 8) | (tx_buf[0] >> 8);
149 #endif
150
151         if (add_len) {
152                 tx_buf[1] = words;
153 #ifdef __LITTLE_ENDIAN
154                 tx_buf[1] = (tx_buf[1] << 8) | (tx_buf[1] >> 8);
155 #endif
156                 ptr2++;
157                 header_len++;
158         }
159
160         spi_message_init(&message);
161         memset(&transfer, 0, sizeof(transfer));
162
163         transfer.len = len + header_len * sizeof(u16);
164         transfer.tx_buf = tx_buf;
165         transfer.rx_buf = rx_buf;
166         spi_message_add_tail(&transfer, &message);
167
168         do {
169                 ret = spi_sync(dd->spi, &message);
170         } while (ret == -EAGAIN);
171
172 #ifdef __LITTLE_ENDIAN
173         for (i = 0; i < words; i++)
174                 ptr1[i] = (ptr2[i] << 8) | (ptr2[i] >> 8);
175 #else
176         memcpy(buf, ptr2, len);
177 #endif
178         return ret;
179 }
180
181 static inline int
182 spi_write_123(struct dev_data *dd, u16 address, u8 *buf, u16 len,
183               bool add_len)
184 {
185         u16  *tx_buf = (u16 *)dd->tx_buf;
186         u16  words = len / sizeof(u16), header_len = 1;
187 #ifdef __LITTLE_ENDIAN
188         u16  i;
189 #endif
190         int  ret;
191
192         if (tx_buf == NULL)
193                 return -ENOMEM;
194
195         tx_buf[0] = address << 1;
196         if (add_len) {
197                 tx_buf[1] = words;
198                 header_len++;
199         }
200         memcpy(tx_buf + header_len, buf, len);
201 #ifdef __LITTLE_ENDIAN
202         for (i = 0; i < (words + header_len); i++)
203                 tx_buf[i] = (tx_buf[i] << 8) | (tx_buf[i] >> 8);
204 #endif
205
206         do {
207                 ret = spi_write(dd->spi, tx_buf,
208                                 len + header_len * sizeof(u16));
209         } while (ret == -EAGAIN);
210
211         memset(dd->tx_buf, 0xFF, sizeof(dd->tx_buf));
212         return ret;
213 }
214
215 /* ======================================================================== */
216
217 static int
218 spi_read_1(struct dev_data *dd, u16 address, u8 *buf, u16 len)
219 {
220         return spi_read_123(dd, address, buf, len, true);
221 }
222
223 static int
224 spi_write_1(struct dev_data *dd, u16 address, u8 *buf, u16 len)
225 {
226         return spi_write_123(dd, address, buf, len, true);
227 }
228
229 /* ======================================================================== */
230
231 static inline int
232 stop_legacy_acceleration(struct dev_data *dd)
233 {
234         u16  value = 0xDEAD, status, i;
235         int  ret;
236
237         ret = spi_write_123(dd, 0x0003, (u8 *)&value,
238                                 sizeof(value), false);
239         if (ret < 0)
240                 return -1;
241         usleep_range(100, 120);
242
243         for (i = 0; i < 200; i++) {
244                 ret = spi_read_123(dd, 0x0003, (u8 *)&status, sizeof(status),
245                                    false);
246                 if (ret < 0)
247                         return -1;
248                 if (status == 0xABCD)
249                         return 0;
250         }
251
252         return -2;
253 }
254
255 static inline int
256 start_legacy_acceleration(struct dev_data *dd)
257 {
258         u16  value = 0xBEEF;
259         int  ret;
260
261         ret = spi_write_123(dd, 0x0003, (u8 *)&value, sizeof(value), false);
262         usleep_range(100, 120);
263
264         return ret;
265 }
266
267 static inline int
268 spi_rw_2_poll_status(struct dev_data *dd)
269 {
270         u16  status, i;
271         int  ret;
272
273         for (i = 0; i < 200; i++) {
274                 ret = spi_read_123(dd, 0x0000, (u8 *)&status, sizeof(status),
275                                    false);
276                 if (ret < 0)
277                         return -1;
278                 if (status == 0xABCD)
279                         return 0;
280         }
281
282         return -2;
283 }
284
285 static inline int
286 spi_read_2_page(struct dev_data *dd, u16 address, u8 *buf, u16 len)
287 {
288         u16  request[] = {0xFEDC, (address << 1) | 0x0001, len / sizeof(u16)};
289         int  ret;
290
291         /* write read request header */
292         ret = spi_write_123(dd, 0x0000, (u8 *)request, sizeof(request),
293                             false);
294         if (ret < 0)
295                 return -1;
296
297         /* poll status */
298         ret = spi_rw_2_poll_status(dd);
299         if (ret < 0)
300                 return ret;
301
302         /* read data */
303         ret = spi_read_123(dd, 0x0004, (u8 *)buf, len, false);
304         return ret;
305 }
306
307 static inline int
308 spi_write_2_page(struct dev_data *dd, u16 address, u8 *buf, u16 len)
309 {
310         u16  page[254];
311         int  ret;
312
313         page[0] = 0xFEDC;
314         page[1] = address << 1;
315         page[2] = len / sizeof(u16);
316         memcpy(page + 4, buf, len);
317
318         /* write data with write request header */
319         ret = spi_write_123(dd, 0x0000, (u8 *)page, len + 4 * sizeof(u16),
320                             false);
321         if (ret < 0)
322                 return -1;
323
324         /* poll status */
325         return spi_rw_2_poll_status(dd);
326 }
327
328 static inline int
329 spi_rw_2(struct dev_data *dd, u16 address, u8 *buf, u16 len,
330          int (*func)(struct dev_data *dd, u16 address, u8 *buf, u16 len))
331 {
332         u16  rx_len, rx_limit = 250 * sizeof(u16), offset = 0;
333         int  ret;
334
335         while (len > 0) {
336                 rx_len = (len > rx_limit) ? rx_limit : len;
337                 if (dd->legacy_acceleration)
338                         stop_legacy_acceleration(dd);
339                 ret = func(dd, address + (offset / sizeof(u16)), buf + offset,
340                            rx_len);
341                 if (dd->legacy_acceleration)
342                         start_legacy_acceleration(dd);
343                 if (ret < 0)
344                         return ret;
345                 offset += rx_len;
346                 len -= rx_len;
347         }
348
349         return 0;
350 }
351
352 static int
353 spi_read_2(struct dev_data *dd, u16 address, u8 *buf, u16 len)
354 {
355         return spi_rw_2(dd, address, buf, len, spi_read_2_page);
356 }
357
358 static int
359 spi_write_2(struct dev_data *dd, u16 address, u8 *buf, u16 len)
360 {
361         return spi_rw_2(dd, address, buf, len, spi_write_2_page);
362 }
363
364 /* ======================================================================== */
365
366 static int
367 spi_read_3(struct dev_data *dd, u16 address, u8 *buf, u16 len)
368 {
369         return spi_read_123(dd, address, buf, len, false);
370 }
371
372 static int
373 spi_write_3(struct dev_data *dd, u16 address, u8 *buf, u16 len)
374 {
375         return spi_write_123(dd, address, buf, len, false);
376 }
377
378 /* ======================================================================== */
379
380 static struct chip_access_method chip_access_methods[] = {
381         {
382                 .read = spi_read_1,
383                 .write = spi_write_1,
384         },
385         {
386                 .read = spi_read_2,
387                 .write = spi_write_2,
388         },
389         {
390                 .read = spi_read_3,
391                 .write = spi_write_3,
392         },
393 };
394
395 static int
396 set_chip_access_method(struct dev_data *dd, u8 method)
397 {
398         if (method == 0 || method > ARRAY_SIZE(chip_access_methods))
399                 return -1;
400
401         memcpy(&dd->chip, &chip_access_methods[method - 1], sizeof(dd->chip));
402         return 0;
403 }
404
405 /* ======================================================================== */
406
407 static inline int
408 stop_legacy_acceleration_canned(struct dev_data *dd)
409 {
410         u16  value = dd->irq_param[18];
411
412         return dd->chip.write(dd, dd->irq_param[16], (u8 *)&value,
413                               sizeof(value));
414 }
415
416 static inline int
417 start_legacy_acceleration_canned(struct dev_data *dd)
418 {
419         u16  value = dd->irq_param[17];
420
421         return dd->chip.write(dd, dd->irq_param[16], (u8 *)&value,
422                               sizeof(value));
423 }
424
425 /* ======================================================================== */
426
427 #define FLASH_BLOCK_SIZE  64      /* flash write buffer in words */
428 #define FIRMWARE_SIZE     0xC000  /* fixed 48Kbytes */
429
430 static int bootloader_wait_ready(struct dev_data *dd)
431 {
432         u16  status, i;
433
434         for (i = 0; i < 15; i++) {
435                 if (spi_read_3(dd, 0x00FF, (u8 *)&status,
436                                sizeof(status)) != 0)
437                         return -1;
438                 if (status == 0xABCC)
439                         return 0;
440                 if (i >= 3)
441                         usleep_range(500, 700);
442         }
443         ERROR("unexpected status %04X", status);
444         return -1;
445 }
446
447 static int bootloader_complete(struct dev_data *dd)
448 {
449         u16  value = 0x5432;
450
451         return spi_write_3(dd, 0x00FF, (u8 *)&value, sizeof(value));
452 }
453
454 static int bootloader_read_data(struct dev_data *dd, u16 *value)
455 {
456         u16  buffer[2];
457
458         if (spi_read_3(dd, 0x00FE, (u8 *)buffer, sizeof(buffer)) != 0)
459                 return -1;
460         if (buffer[1] != 0xABCC)
461                 return -1;
462
463         *value = buffer[0];
464         return bootloader_complete(dd);
465 }
466
467 static int bootloader_write_data(struct dev_data *dd, u16 value)
468 {
469         u16  buffer[2] = {value, 0x5432};
470
471         if (bootloader_wait_ready(dd) != 0)
472                 return -1;
473         return spi_write_3(dd, 0x00FE, (u8 *)buffer, sizeof(buffer));
474 }
475
476 static int bootloader_wait_command(struct dev_data *dd)
477 {
478         u16  value, i;
479
480         for (i = 0; i < 15; i++) {
481                 if (bootloader_read_data(dd, &value) == 0 && value == 0x003E)
482                         return 0;
483                 if (i >= 3)
484                         usleep_range(500, 700);
485         }
486         return -1;
487 }
488
489 static int bootloader_enter(struct dev_data *dd)
490 {
491         int i;
492         u16 enter[3] = {0x0047, 0x00C7, 0x0007};
493
494         for (i = 0; i < 3; i++) {
495                 if (spi_write_3(dd, 0x7F00, (u8 *)&enter[i],
496                                 sizeof(enter[i])) != 0)
497                         return -1;
498         }
499
500         if (bootloader_wait_command(dd) != 0)
501                 return -1;
502         return 0;
503 }
504
505 static int bootloader_exit(struct dev_data *dd)
506 {
507         u16  value = 0x0000;
508
509         if (bootloader_write_data(dd, 0x0001) != 0)
510                 return -1;
511         return spi_write_3(dd, 0x7F00, (u8 *)&value, sizeof(value));
512 }
513
514 static int bootloader_get_crc(struct dev_data *dd, u16 *crc16, u16 len)
515 {
516         u16 command[] = {0x0030, 0x0002, 0x0000, 0x0000, len & 0xFF,
517                         len >> 8}, value[2], i;
518
519         for (i = 0; i < ARRAY_SIZE(command); i++)
520                 if (bootloader_write_data(dd, command[i]) != 0)
521                         return -1;
522         msleep(200); /* wait 200ms for it to get done */
523
524         for (i = 0; i < 2; i++)
525                 if (bootloader_read_data(dd, &value[i]) != 0)
526                         return -1;
527
528         if (bootloader_wait_command(dd) != 0)
529                 return -1;
530         *crc16 = (value[1] << 8) | value[0];
531         return 0;
532 }
533
534 static int bootloader_set_byte_mode(struct dev_data *dd)
535 {
536         u16  command[2] = {0x000A, 0x0000}, i;
537
538         for (i = 0; i < ARRAY_SIZE(command); i++)
539                 if (bootloader_write_data(dd, command[i]) != 0)
540                         return -1;
541         if (bootloader_wait_command(dd) != 0)
542                 return -1;
543         return 0;
544 }
545
546 static int bootloader_erase_flash(struct dev_data *dd)
547 {
548         if (bootloader_write_data(dd, 0x0002) != 0)
549                 return -1;
550         msleep(60); /* wait 60ms */
551         if (bootloader_wait_command(dd) != 0)
552                 return -1;
553         return 0;
554 }
555
556 static int bootloader_write_flash(struct dev_data *dd, u16 *image, u16 len)
557 {
558         u16  command[] = {0x00F0, 0x0000, len >> 8, 0x0000, 0x0000};
559         u16  i, buffer[FLASH_BLOCK_SIZE];
560
561         for (i = 0; i < ARRAY_SIZE(command); i++)
562                 if (bootloader_write_data(dd, command[i]) != 0)
563                         return -1;
564
565         for (i = 0; i < ((len / sizeof(u16)) / FLASH_BLOCK_SIZE); i++) {
566                 if (bootloader_wait_ready(dd) != 0)
567                         return -1;
568                 memcpy(buffer, (void *)(image + i * FLASH_BLOCK_SIZE),
569                         sizeof(buffer));
570                 if (spi_write_3(dd, ((i % 2) == 0) ? 0x0000 : 0x0040,
571                                 (u8 *)buffer, sizeof(buffer)) != 0)
572                         return -1;
573                 if (bootloader_complete(dd) != 0)
574                         return -1;
575         }
576
577         usleep_range(10000, 11000);
578         if (bootloader_wait_command(dd) != 0)
579                 return -1;
580         return 0;
581 }
582
583 static int device_fw_load(struct dev_data *dd, const struct firmware *fw)
584 {
585         u16  fw_crc16, chip_crc16;
586
587         fw_crc16 = crc16(0, fw->data, fw->size);
588         INFO("firmware size (%d) CRC16(0x%04X)", fw->size, fw_crc16);
589         if (bootloader_enter(dd) != 0) {
590                 ERROR("failed to enter bootloader");
591                 return -1;
592         }
593         if (bootloader_get_crc(dd, &chip_crc16, fw->size) != 0) {
594                 ERROR("failed to get CRC16 from the chip");
595                 return -1;
596         }
597         INFO("chip CRC16(0x%04X)", chip_crc16);
598         if (fw_crc16 != chip_crc16) {
599                 INFO("will reprogram chip");
600                 if (bootloader_erase_flash(dd) != 0) {
601                         ERROR("failed to erase chip flash");
602                         return -1;
603                 }
604                 INFO("flash erase OK");
605                 if (bootloader_set_byte_mode(dd) != 0) {
606                         ERROR("failed to set byte mode");
607                         return -1;
608                 }
609                 INFO("byte mode OK");
610                 if (bootloader_write_flash(dd, (u16 *)fw->data,
611                                                         fw->size) != 0) {
612                         ERROR("failed to write flash");
613                         return -1;
614                 }
615                 INFO("flash write OK");
616                 if (bootloader_get_crc(dd, &chip_crc16, fw->size) != 0) {
617                         ERROR("failed to get CRC16 from the chip");
618                         return -1;
619                 }
620                 if (fw_crc16 != chip_crc16) {
621                         ERROR("failed to verify programming! (0x%04X)",
622                               chip_crc16);
623                         return -1;
624                 }
625                 INFO("chip programmed successfully, new chip CRC16(0x%04X)",
626                         chip_crc16);
627         }
628         if (bootloader_exit(dd) != 0) {
629                 ERROR("failed to exit bootloader");
630                 return -1;
631         }
632         return 0;
633 }
634
635 static int fw_request_load(struct dev_data *dd)
636 {
637         const struct firmware *fw;
638         struct maxim_sti_pdata *pdata = dd->spi->dev.platform_data;
639         char *fw_name = pdata->fw_name;
640         int  ret;
641
642         ret = request_firmware(&fw, fw_name, &dd->spi->dev);
643         if (ret || fw == NULL) {
644                 ERROR("firmware request failed (%d,%p)", ret, fw);
645                 return -1;
646         }
647         if (fw->size != FIRMWARE_SIZE) {
648                 release_firmware(fw);
649                 ERROR("incoming firmware is of wrong size (%04X)", fw->size);
650                 return -1;
651         }
652         ret = device_fw_load(dd, fw);
653         if (ret != 0 && bootloader_exit(dd) != 0)
654                 ERROR("failed to exit bootloader");
655         release_firmware(fw);
656         return ret;
657 }
658
659 /* ======================================================================== */
660
661 static void stop_scan_canned(struct dev_data *dd)
662 {
663         u16  value;
664
665         if (dd->legacy_acceleration)
666                 (void)stop_legacy_acceleration_canned(dd);
667         value = dd->irq_param[13];
668         (void)dd->chip.write(dd, dd->irq_param[12], (u8 *)&value,
669                              sizeof(value));
670         value = dd->irq_param[11];
671         (void)dd->chip.write(dd, dd->irq_param[0], (u8 *)&value,
672                              sizeof(value));
673         usleep_range(dd->irq_param[15], dd->irq_param[15] + 1000);
674         (void)dd->chip.write(dd, dd->irq_param[0], (u8 *)&value,
675                              sizeof(value));
676 }
677
678 #if !SUSPEND_POWER_OFF
679 static void start_scan_canned(struct dev_data *dd)
680 {
681         u16  value;
682
683         if (dd->legacy_acceleration) {
684                 (void)start_legacy_acceleration_canned(dd);
685         } else {
686                 value = dd->irq_param[14];
687                 (void)dd->chip.write(dd, dd->irq_param[12], (u8 *)&value,
688                                      sizeof(value));
689         }
690 }
691 #endif
692
693 static int regulator_control(struct dev_data *dd, bool on)
694 {
695         int ret = 0;
696
697         if (!dd->reg_avdd || !dd->reg_dvdd)
698                 return 0;
699
700         if (on) {
701                 ret = regulator_enable(dd->reg_dvdd);
702                 if (ret < 0) {
703                         ERROR("Failed to enable regulator dvdd: %d", ret);
704                         return ret;
705                 }
706                 usleep_range(1000, 1020);
707
708                 ret = regulator_enable(dd->reg_avdd);
709                 if (ret < 0) {
710                         ERROR("Failed to enable regulator avdd: %d", ret);
711                         regulator_disable(dd->reg_dvdd);
712                         return ret;
713                 }
714         } else {
715                 if (regulator_is_enabled(dd->reg_avdd))
716                         ret = regulator_disable(dd->reg_avdd);
717                 if (ret < 0) {
718                         ERROR("Failed to disable regulator avdd: %d", ret);
719                         return ret;
720                 }
721
722                 if (regulator_is_enabled(dd->reg_dvdd))
723                         ret = regulator_disable(dd->reg_dvdd);
724                 if (ret < 0) {
725                         ERROR("Failed to disable regulator dvdd: %d", ret);
726                         regulator_enable(dd->reg_avdd);
727                         return ret;
728                 }
729         }
730
731         return 0;
732 }
733
734 static void regulator_init(struct dev_data *dd)
735 {
736         dd->reg_avdd = devm_regulator_get(&dd->spi->dev, "avdd");
737         if (IS_ERR(dd->reg_avdd))
738                 goto err_null_regulator;
739
740         dd->reg_dvdd = devm_regulator_get(&dd->spi->dev, "dvdd");
741         if (IS_ERR(dd->reg_dvdd))
742                 goto err_null_regulator;
743
744         return;
745
746 err_null_regulator:
747         dd->reg_avdd = NULL;
748         dd->reg_dvdd = NULL;
749         dev_warn(&dd->spi->dev, "Failed to init regulators\n");
750 }
751
752 /****************************************************************************\
753 * Suspend/resume processing                                                  *
754 \****************************************************************************/
755
756 #ifdef CONFIG_PM_SLEEP
757 static int suspend(struct device *dev)
758 {
759         struct dev_data  *dd = spi_get_drvdata(to_spi_device(dev));
760         struct maxim_sti_pdata *pdata = dev->platform_data;
761         int ret;
762
763         INFO("suspending...");
764
765         if (dd->suspend_in_progress)
766                 return 0;
767
768         dd->suspend_in_progress = true;
769         wake_up_process(dd->thread);
770         wait_for_completion(&dd->suspend_resume);
771
772 #if SUSPEND_POWER_OFF
773         /* reset-low and power-down */
774         pdata->reset(pdata, 0);
775         usleep_range(100, 120);
776         ret = regulator_control(dd, false);
777         if (ret < 0)
778                 return ret;
779 #endif
780
781 #ifdef CONFIG_PM_WAKELOCKS
782         __pm_relax(&dd->ws);
783 #endif
784         INFO("suspend...done");
785
786         return 0;
787 }
788
789 static int resume(struct device *dev)
790 {
791         struct dev_data  *dd = spi_get_drvdata(to_spi_device(dev));
792         struct maxim_sti_pdata *pdata = dev->platform_data;
793         int ret;
794
795         INFO("resuming...");
796
797         if (!dd->suspend_in_progress)
798                 return 0;
799
800 #ifdef CONFIG_PM_WAKELOCKS
801         __pm_stay_awake(&dd->ws);
802 #endif
803
804 #if SUSPEND_POWER_OFF
805         /* power-up and reset-high */
806         pdata->reset(pdata, 0);
807         ret = regulator_control(dd, true);
808         if (ret < 0)
809                 return ret;
810         usleep_range(300, 400);
811         pdata->reset(pdata, 1);
812 #endif
813
814         dd->resume_in_progress = true;
815         wake_up_process(dd->thread);
816         wait_for_completion(&dd->suspend_resume);
817
818         INFO("resume...done");
819
820         return 0;
821 }
822
823 static const struct dev_pm_ops pm_ops = {
824         .suspend = suspend,
825         .resume = resume,
826 };
827
828 #if INPUT_ENABLE_DISABLE
829 static int input_disable(struct input_dev *dev)
830 {
831         struct dev_data *dd = input_get_drvdata(dev);
832
833         return suspend(&dd->spi->dev);
834 }
835
836 static int input_enable(struct input_dev *dev)
837 {
838         struct dev_data *dd = input_get_drvdata(dev);
839
840         return resume(&dd->spi->dev);
841 }
842 #endif
843 #endif
844
845 /****************************************************************************\
846 * Netlink processing                                                         *
847 \****************************************************************************/
848
849 static inline int
850 nl_msg_new(struct dev_data *dd, u8 dst)
851 {
852         dd->outgoing_skb = alloc_skb(NL_BUF_SIZE, GFP_KERNEL);
853         if (dd->outgoing_skb == NULL)
854                 return -ENOMEM;
855         nl_msg_init(dd->outgoing_skb->data, dd->nl_family.id, dd->nl_seq++,
856                     dst);
857         if (dd->nl_seq == 0)
858                 dd->nl_seq++;
859         return 0;
860 }
861
862 static int
863 nl_callback_noop(struct sk_buff *skb, struct genl_info *info)
864 {
865         return 0;
866 }
867
868 static inline bool
869 nl_process_driver_msg(struct dev_data *dd, u16 msg_id, void *msg)
870 {
871         struct maxim_sti_pdata        *pdata = dd->spi->dev.platform_data;
872         struct dr_add_mc_group        *add_mc_group_msg;
873         struct dr_echo_request        *echo_msg;
874         struct fu_echo_response       *echo_response;
875         struct dr_chip_read           *read_msg;
876         struct fu_chip_read_result    *read_result;
877         struct dr_chip_write          *write_msg;
878         struct dr_chip_access_method  *chip_access_method_msg;
879         struct dr_delay               *delay_msg;
880         struct fu_irqline_status      *irqline_status;
881         struct dr_config_irq          *config_irq_msg;
882         struct dr_config_input        *config_input_msg;
883         struct dr_config_watchdog     *config_watchdog_msg;
884         struct dr_input               *input_msg;
885         struct dr_legacy_acceleration *legacy_acceleration_msg;
886         u8                            i, inp;
887         int                           ret;
888
889         switch (msg_id) {
890         case DR_ADD_MC_GROUP:
891                 add_mc_group_msg = msg;
892                 if (add_mc_group_msg->number >= pdata->nl_mc_groups) {
893                         ERROR("invalid multicast group number %d (%d)",
894                               add_mc_group_msg->number, pdata->nl_mc_groups);
895                         return false;
896                 }
897                 if (dd->nl_mc_groups[add_mc_group_msg->number].id != 0)
898                         return false;
899                 dd->nl_ops[add_mc_group_msg->number].cmd =
900                                                 add_mc_group_msg->number;
901                 dd->nl_ops[add_mc_group_msg->number].doit = nl_callback_noop;
902                 ret = genl_register_ops(&dd->nl_family,
903                                 &dd->nl_ops[add_mc_group_msg->number]);
904                 if (ret < 0)
905                         ERROR("failed to add multicast group op (%d)", ret);
906                 GENL_COPY(dd->nl_mc_groups[add_mc_group_msg->number].name,
907                           add_mc_group_msg->name);
908                 ret = genl_register_mc_group(&dd->nl_family,
909                                 &dd->nl_mc_groups[add_mc_group_msg->number]);
910                 if (ret < 0)
911                         ERROR("failed to add multicast group (%d)", ret);
912                 return false;
913         case DR_ECHO_REQUEST:
914                 echo_msg = msg;
915                 echo_response = nl_alloc_attr(dd->outgoing_skb->data,
916                                               FU_ECHO_RESPONSE,
917                                               sizeof(*echo_response));
918                 if (echo_response == NULL)
919                         goto alloc_attr_failure;
920                 echo_response->cookie = echo_msg->cookie;
921                 return true;
922         case DR_CHIP_READ:
923                 read_msg = msg;
924                 read_result = nl_alloc_attr(dd->outgoing_skb->data,
925                                 FU_CHIP_READ_RESULT,
926                                 sizeof(*read_result) + read_msg->length);
927                 if (read_result == NULL)
928                         goto alloc_attr_failure;
929                 read_result->address = read_msg->address;
930                 read_result->length = read_msg->length;
931                 ret = dd->chip.read(dd, read_msg->address, read_result->data,
932                                     read_msg->length);
933                 if (ret < 0)
934                         ERROR("failed to read from chip (%d)", ret);
935                 return true;
936         case DR_CHIP_WRITE:
937                 write_msg = msg;
938                 ret = dd->chip.write(dd, write_msg->address, write_msg->data,
939                                      write_msg->length);
940                 if (ret < 0)
941                         ERROR("failed to write chip (%d)", ret);
942                 return false;
943         case DR_CHIP_RESET:
944                 pdata->reset(pdata, ((struct dr_chip_reset *)msg)->state);
945                 return false;
946         case DR_GET_IRQLINE:
947                 irqline_status = nl_alloc_attr(dd->outgoing_skb->data,
948                                                FU_IRQLINE_STATUS,
949                                                sizeof(*irqline_status));
950                 if (irqline_status == NULL)
951                         goto alloc_attr_failure;
952                 irqline_status->status = pdata->irq(pdata);
953                 return true;
954         case DR_DELAY:
955                 delay_msg = msg;
956                 if (delay_msg->period > 1000)
957                         msleep(delay_msg->period / 1000);
958                 usleep_range(delay_msg->period % 1000,
959                             (delay_msg->period % 1000) + 10);
960                 return false;
961         case DR_CHIP_ACCESS_METHOD:
962                 chip_access_method_msg = msg;
963                 ret = set_chip_access_method(dd,
964                                              chip_access_method_msg->method);
965                 if (ret < 0)
966                         ERROR("failed to set chip access method (%d) (%d)",
967                               ret, chip_access_method_msg->method);
968                 return false;
969         case DR_CONFIG_IRQ:
970                 config_irq_msg = msg;
971                 if (config_irq_msg->irq_params > MAX_IRQ_PARAMS) {
972                         ERROR("too many IRQ parameters");
973                         return false;
974                 }
975                 memcpy(dd->irq_param, config_irq_msg->irq_param,
976                        config_irq_msg->irq_params * sizeof(dd->irq_param[0]));
977                 if (dd->irq_registered)
978                         return false;
979                 dd->service_irq = service_irq;
980                 ret = request_irq(dd->spi->irq, irq_handler,
981                         (config_irq_msg->irq_edge == DR_IRQ_RISING_EDGE) ?
982                                 IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING,
983                                                 pdata->nl_family, dd);
984                 if (ret < 0) {
985                         ERROR("failed to request IRQ (%d)", ret);
986                 } else {
987                         dd->irq_registered = true;
988                         wake_up_process(dd->thread);
989                 }
990                 return false;
991         case DR_CONFIG_INPUT:
992                 config_input_msg = msg;
993                 for (i = 0; i < INPUT_DEVICES; i++)
994                         if (dd->input_dev[i] != NULL)
995                                 return false;
996                 for (i = 0; i < INPUT_DEVICES; i++) {
997                         dd->input_dev[i] = input_allocate_device();
998                         if (dd->input_dev[i] == NULL) {
999                                 ERROR("failed to allocate input device");
1000                                 continue;
1001                         }
1002                         snprintf(dd->input_phys, sizeof(dd->input_phys),
1003                                  "%s/input%d", dev_name(&dd->spi->dev), i);
1004                         dd->input_dev[i]->name = pdata->nl_family;
1005                         dd->input_dev[i]->phys = dd->input_phys;
1006                         dd->input_dev[i]->id.bustype = BUS_SPI;
1007 #if defined(CONFIG_PM_SLEEP) && INPUT_ENABLE_DISABLE
1008                         if (i == 0) {
1009                                 dd->input_dev[i]->enable = input_enable;
1010                                 dd->input_dev[i]->disable = input_disable;
1011                                 dd->input_dev[i]->enabled = true;
1012                                 input_set_drvdata(dd->input_dev[i], dd);
1013                         }
1014 #endif
1015 #if NV_ENABLE_CPU_BOOST
1016                         if (i == 0)
1017                                 input_set_capability(dd->input_dev[i], EV_MSC,
1018                                                      MSC_ACTIVITY);
1019 #endif
1020                         __set_bit(EV_SYN, dd->input_dev[i]->evbit);
1021                         __set_bit(EV_ABS, dd->input_dev[i]->evbit);
1022                         if (i == ID_STYLUS) {
1023                                 __set_bit(EV_KEY, dd->input_dev[i]->evbit);
1024                                 __set_bit(BTN_TOOL_RUBBER,
1025                                           dd->input_dev[i]->keybit);
1026                         }
1027                         input_set_abs_params(dd->input_dev[i],
1028                                              ABS_MT_POSITION_X, 0,
1029                                              config_input_msg->x_range, 0, 0);
1030                         input_set_abs_params(dd->input_dev[i],
1031                                              ABS_MT_POSITION_Y, 0,
1032                                              config_input_msg->y_range, 0, 0);
1033                         input_set_abs_params(dd->input_dev[i],
1034                                              ABS_MT_PRESSURE, 0, 0xFF, 0, 0);
1035                         input_set_abs_params(dd->input_dev[i],
1036                                              ABS_MT_TRACKING_ID, 0,
1037                                              MAX_INPUT_EVENTS, 0, 0);
1038
1039                         if (i == ID_STYLUS) {
1040                                 input_set_abs_params(dd->input_dev[i],
1041                                         ABS_MT_TOOL_TYPE, 0, MT_TOOL_MAX,
1042                                         0, 0);
1043                         } else {
1044                                 input_set_abs_params(dd->input_dev[i],
1045                                         ABS_MT_TOOL_TYPE, 0, MT_TOOL_FINGER,
1046                                         0, 0);
1047                         }
1048
1049                         ret = input_register_device(dd->input_dev[i]);
1050                         if (ret < 0) {
1051                                 input_free_device(dd->input_dev[i]);
1052                                 dd->input_dev[i] = NULL;
1053                                 ERROR("failed to register input device");
1054                         }
1055                 }
1056                 return false;
1057         case DR_CONFIG_WATCHDOG:
1058                 config_watchdog_msg = msg;
1059                 dd->fusion_process = (pid_t)config_watchdog_msg->pid;
1060                 return false;
1061         case DR_DECONFIG:
1062                 if (dd->irq_registered) {
1063                         free_irq(dd->spi->irq, dd);
1064                         dd->irq_registered = false;
1065                 }
1066                 stop_scan_canned(dd);
1067                 if (!dd->input_no_deconfig) {
1068                         for (i = 0; i < INPUT_DEVICES; i++) {
1069                                 if (dd->input_dev[i] == NULL)
1070                                         continue;
1071                                 input_unregister_device(dd->input_dev[i]);
1072                                 dd->input_dev[i] = NULL;
1073                         }
1074                 }
1075 #if (INPUT_DEVICES > 1)
1076                 dd->last_finger_active = false;
1077                 dd->last_stylus_active = false;
1078 #endif
1079                 dd->input_ignore = false;
1080                 dd->eraser_active = false;
1081                 dd->legacy_acceleration = false;
1082                 dd->service_irq = service_irq;
1083                 dd->fusion_process = (pid_t)0;
1084                 return false;
1085         case DR_INPUT:
1086                 if (dd->input_ignore)
1087                         return false;
1088                 input_msg = msg;
1089                 if (input_msg->events == 0) {
1090                         if (dd->eraser_active) {
1091                                 input_report_key(
1092                                         dd->input_dev[ID_STYLUS],
1093                                         BTN_TOOL_RUBBER, 0);
1094                                 dd->eraser_active = false;
1095                         }
1096                         for (i = 0; i < INPUT_DEVICES; i++) {
1097                                 input_mt_sync(dd->input_dev[i]);
1098                                 input_sync(dd->input_dev[i]);
1099                         }
1100 #if (INPUT_DEVICES > 1)
1101                         dd->last_finger_active = false;
1102                         dd->last_stylus_active = false;
1103 #endif
1104                 } else {
1105 #if (INPUT_DEVICES > 1)
1106                         bool current_finger_active = false;
1107                         bool current_stylus_active = false;
1108                         for (i = 0; i < input_msg->events; i++) {
1109                                 if (!current_finger_active &&
1110                                         (input_msg->event[i].tool_type
1111                                         == DR_INPUT_FINGER)) {
1112                                         current_finger_active = true;
1113                                 }
1114                                 if (!current_stylus_active &&
1115                                         ((input_msg->event[i].tool_type
1116                                         == DR_INPUT_STYLUS) ||
1117                                         (input_msg->event[i].tool_type
1118                                         == DR_INPUT_ERASER))) {
1119                                         current_stylus_active = true;
1120                                 }
1121                         }
1122 #if NV_STYLUS_FINGER_EXCLUSION
1123                         if (dd->last_finger_active && !dd->last_stylus_active &&
1124                                 current_stylus_active) {
1125 #else
1126                         if (dd->last_finger_active && !current_finger_active) {
1127 #endif
1128                                 input_mt_sync(dd->input_dev[ID_FINGER]);
1129                                 input_sync(dd->input_dev[ID_FINGER]);
1130                         }
1131                         if (dd->last_stylus_active && !current_stylus_active) {
1132                                 if (dd->eraser_active) {
1133                                         input_report_key(
1134                                                 dd->input_dev[ID_STYLUS],
1135                                                 BTN_TOOL_RUBBER, 0);
1136                                         dd->eraser_active = false;
1137                                 }
1138                                 input_mt_sync(dd->input_dev[ID_STYLUS]);
1139                                 input_sync(dd->input_dev[ID_STYLUS]);
1140                         }
1141                         dd->last_finger_active = current_finger_active;
1142                         dd->last_stylus_active = current_stylus_active;
1143 #endif
1144                         for (i = 0; i < input_msg->events; i++) {
1145 #if (INPUT_DEVICES > 1) && NV_STYLUS_FINGER_EXCLUSION
1146                                 if ((input_msg->event[i].tool_type
1147                                         == DR_INPUT_FINGER) &&
1148                                         current_stylus_active) {
1149                                         continue;
1150                                 }
1151 #endif
1152                                 switch (input_msg->event[i].tool_type) {
1153                                 case DR_INPUT_FINGER:
1154                                         inp = ID_FINGER;
1155                                         input_report_abs(dd->input_dev[inp],
1156                                                          ABS_MT_TOOL_TYPE,
1157                                                          MT_TOOL_FINGER);
1158                                         break;
1159                                 case DR_INPUT_STYLUS:
1160                                         inp = ID_STYLUS;
1161                                         input_report_abs(dd->input_dev[inp],
1162                                                          ABS_MT_TOOL_TYPE,
1163                                                          MT_TOOL_PEN);
1164                                         break;
1165                                 case DR_INPUT_ERASER:
1166                                         inp = ID_STYLUS;
1167                                         input_report_key(dd->input_dev[inp],
1168                                                 BTN_TOOL_RUBBER, 1);
1169                                         dd->eraser_active = true;
1170                                         break;
1171                                 default:
1172                                         inp = ID_FINGER;
1173                                         ERROR("invalid input tool type (%d)",
1174                                               input_msg->event[i].tool_type);
1175                                         break;
1176                                 }
1177                                 input_report_abs(dd->input_dev[inp],
1178                                                  ABS_MT_TRACKING_ID,
1179                                                  input_msg->event[i].id);
1180                                 input_report_abs(dd->input_dev[inp],
1181                                                  ABS_MT_POSITION_X,
1182                                                  input_msg->event[i].x);
1183                                 input_report_abs(dd->input_dev[inp],
1184                                                  ABS_MT_POSITION_Y,
1185                                                  input_msg->event[i].y);
1186                                 input_report_abs(dd->input_dev[inp],
1187                                                  ABS_MT_PRESSURE,
1188                                                  input_msg->event[i].z);
1189                                 input_mt_sync(dd->input_dev[inp]);
1190                         }
1191                         for (i = 0; i < INPUT_DEVICES; i++)
1192                                 input_sync(dd->input_dev[i]);
1193                 }
1194                 return false;
1195         case DR_RESUME_ACK:
1196                 dd->input_ignore = false;
1197                 if (dd->irq_registered)
1198                         enable_irq(dd->spi->irq);
1199                 return false;
1200         case DR_LEGACY_FWDL:
1201                 ret = fw_request_load(dd);
1202                 if (ret < 0)
1203                         ERROR("firmware download failed (%d)", ret);
1204                 else
1205                         INFO("firmware download OK");
1206                 return false;
1207         case DR_LEGACY_ACCELERATION:
1208                 legacy_acceleration_msg = msg;
1209                 if (legacy_acceleration_msg->enable) {
1210                         dd->service_irq = service_irq_legacy_acceleration;
1211                         start_legacy_acceleration(dd);
1212                         dd->legacy_acceleration = true;
1213                 } else {
1214                         stop_legacy_acceleration(dd);
1215                         dd->legacy_acceleration = false;
1216                         dd->service_irq = service_irq;
1217                 }
1218                 return false;
1219         default:
1220                 ERROR("unexpected message %d", msg_id);
1221                 return false;
1222         }
1223
1224 alloc_attr_failure:
1225         ERROR("failed to allocate response for msg_id %d", msg_id);
1226         return false;
1227 }
1228
1229 static int nl_process_msg(struct dev_data *dd, struct sk_buff *skb)
1230 {
1231         struct nlattr  *attr;
1232         bool           send_reply = false;
1233         int            ret = 0, ret2;
1234
1235         /* process incoming message */
1236         attr = NL_ATTR_FIRST(skb->data);
1237         for (; attr < NL_ATTR_LAST(skb->data); attr = NL_ATTR_NEXT(attr)) {
1238                 if (nl_process_driver_msg(dd, attr->nla_type,
1239                                           NL_ATTR_VAL(attr, void)))
1240                         send_reply = true;
1241         }
1242
1243         /* send back reply if requested */
1244         if (send_reply) {
1245                 (void)skb_put(dd->outgoing_skb,
1246                               NL_SIZE(dd->outgoing_skb->data));
1247                 if (NL_SEQ(skb->data) == 0)
1248                         ret = genlmsg_unicast(sock_net(skb->sk),
1249                                               dd->outgoing_skb,
1250                                               NETLINK_CB(skb).pid);
1251                 else
1252                         ret = genlmsg_multicast(dd->outgoing_skb, 0,
1253                                         dd->nl_mc_groups[MC_FUSION].id,
1254                                         GFP_KERNEL);
1255                 if (ret < 0)
1256                         ERROR("could not reply to fusion (%d)", ret);
1257
1258                 /* allocate new outgoing skb */
1259                 ret2 = nl_msg_new(dd, MC_FUSION);
1260                 if (ret2 < 0)
1261                         ERROR("could not allocate outgoing skb (%d)", ret2);
1262         }
1263
1264         /* free incoming message */
1265         kfree_skb(skb);
1266         return ret;
1267 }
1268
1269 static int
1270 nl_callback_driver(struct sk_buff *skb, struct genl_info *info)
1271 {
1272         struct dev_data  *dd;
1273         struct sk_buff   *skb2;
1274         unsigned long    flags;
1275
1276         /* locate device structure */
1277         spin_lock_irqsave(&dev_lock, flags);
1278         list_for_each_entry(dd, &dev_list, dev_list)
1279                 if (dd->nl_family.id == NL_TYPE(skb->data))
1280                         break;
1281         spin_unlock_irqrestore(&dev_lock, flags);
1282         if (&dd->dev_list == &dev_list)
1283                 return -ENODEV;
1284         if (!dd->nl_enabled)
1285                 return -EAGAIN;
1286
1287         /* queue incoming skb and wake up processing thread */
1288         skb2 = skb_clone(skb, GFP_ATOMIC);
1289         if (skb2 == NULL) {
1290                 ERROR("failed to clone incoming skb");
1291                 return -ENOMEM;
1292         } else {
1293                 skb_queue_tail(&dd->incoming_skb_queue, skb2);
1294                 wake_up_process(dd->thread);
1295                 return 0;
1296         }
1297 }
1298
1299 static int
1300 nl_callback_fusion(struct sk_buff *skb, struct genl_info *info)
1301 {
1302         struct dev_data  *dd;
1303         unsigned long    flags;
1304
1305         /* locate device structure */
1306         spin_lock_irqsave(&dev_lock, flags);
1307         list_for_each_entry(dd, &dev_list, dev_list)
1308                 if (dd->nl_family.id == NL_TYPE(skb->data))
1309                         break;
1310         spin_unlock_irqrestore(&dev_lock, flags);
1311         if (&dd->dev_list == &dev_list)
1312                 return -ENODEV;
1313         if (!dd->nl_enabled)
1314                 return -EAGAIN;
1315
1316         (void)genlmsg_multicast(skb_clone(skb, GFP_ATOMIC), 0,
1317                                 dd->nl_mc_groups[MC_FUSION].id, GFP_ATOMIC);
1318         return 0;
1319 }
1320
1321 /****************************************************************************\
1322 * Interrupt processing                                                       *
1323 \****************************************************************************/
1324
1325 static irqreturn_t irq_handler(int irq, void *context)
1326 {
1327         struct dev_data  *dd = context;
1328
1329         trace_touchscreen_maxim_irq("irq_handler");
1330
1331 #if NV_ENABLE_CPU_BOOST
1332         if (time_after(jiffies, dd->last_irq_jiffies + INPUT_IDLE_PERIOD))
1333                 input_event(dd->input_dev[0], EV_MSC, MSC_ACTIVITY, 1);
1334         dd->last_irq_jiffies = jiffies;
1335 #endif
1336
1337         wake_up_process(dd->thread);
1338         return IRQ_HANDLED;
1339 }
1340
1341 static void service_irq_legacy_acceleration(struct dev_data *dd)
1342 {
1343         struct fu_async_data  *async_data;
1344         u16                   len, rx_len = 0, offset = 0;
1345         u16                   buf[255], rx_limit = 250 * sizeof(u16);
1346         int                   ret = 0, counter = 0;
1347
1348         async_data = nl_alloc_attr(dd->outgoing_skb->data, FU_ASYNC_DATA,
1349                                    sizeof(*async_data) + dd->irq_param[4] +
1350                                    2 * sizeof(u16));
1351         if (async_data == NULL) {
1352                 ERROR("can't add data to async IRQ buffer");
1353                 return;
1354         }
1355         async_data->length = dd->irq_param[4] + 2 * sizeof(u16);
1356         len = async_data->length;
1357         async_data->address = 0;
1358
1359         while (len > 0) {
1360                 rx_len = (len > rx_limit) ? rx_limit : len;
1361                 ret = spi_read_123(dd, 0x0000, (u8 *)&buf,
1362                                         rx_len + 4 * sizeof(u16), false);
1363                 if (ret < 0)
1364                         break;
1365
1366                 if (buf[3] == 0xBABE) {
1367                         dd->legacy_acceleration = false;
1368                         dd->service_irq = service_irq;
1369                         nl_msg_init(dd->outgoing_skb->data, dd->nl_family.id,
1370                                     dd->nl_seq - 1, MC_FUSION);
1371                         return;
1372                 }
1373
1374                 if (rx_limit == rx_len)
1375                         usleep_range(200, 300);
1376
1377                 if (buf[0] == 0x6060) {
1378                         ERROR("data not ready");
1379                         start_legacy_acceleration_canned(dd);
1380                         ret = -EBUSY;
1381                         break;
1382                 } else if (buf[0] == 0x8070) {
1383                         if (buf[1] == dd->irq_param[1] ||
1384                                         buf[1] == dd->irq_param[2])
1385                                 async_data->address = buf[1];
1386
1387                         if (async_data->address +
1388                                         offset / sizeof(u16) != buf[1]) {
1389                                 ERROR("sequence number incorrect %04X", buf[1]);
1390                                 start_legacy_acceleration_canned(dd);
1391                                 ret = -EBUSY;
1392                                 break;
1393                         }
1394                 }
1395                 counter++;
1396                 memcpy(async_data->data + offset, buf + 4, rx_len);
1397                 offset += rx_len;
1398                 len -= rx_len;
1399         }
1400         async_data->status = *(buf + rx_len / sizeof(u16) + 2);
1401
1402         if (ret < 0) {
1403                 ERROR("can't read IRQ buffer (%d)", ret);
1404                 nl_msg_init(dd->outgoing_skb->data, dd->nl_family.id,
1405                             dd->nl_seq - 1, MC_FUSION);
1406         } else {
1407                 (void)skb_put(dd->outgoing_skb,
1408                               NL_SIZE(dd->outgoing_skb->data));
1409                 ret = genlmsg_multicast(dd->outgoing_skb, 0,
1410                                         dd->nl_mc_groups[MC_FUSION].id,
1411                                         GFP_KERNEL);
1412                 if (ret < 0) {
1413                         ERROR("can't send IRQ buffer %d", ret);
1414                         msleep(300);
1415                         if (++dd->send_fail_count >= 10 &&
1416                             dd->fusion_process != (pid_t)0) {
1417                                 (void)kill_pid(
1418                                         find_get_pid(dd->fusion_process),
1419                                         SIGKILL, 1);
1420                                 wake_up_process(dd->thread);
1421                         }
1422                 } else {
1423                         dd->send_fail_count = 0;
1424                 }
1425                 ret = nl_msg_new(dd, MC_FUSION);
1426                 if (ret < 0)
1427                         ERROR("could not allocate outgoing skb (%d)", ret);
1428         }
1429 }
1430
1431 static void service_irq(struct dev_data *dd)
1432 {
1433         struct fu_async_data  *async_data;
1434         u16                   status, clear, test, address[2], xbuf;
1435         bool                  read_buf[2] = {true, false};
1436         int                   ret, ret2;
1437
1438         ret = dd->chip.read(dd, dd->irq_param[0], (u8 *)&status,
1439                             sizeof(status));
1440         if (ret < 0) {
1441                 ERROR("can't read IRQ status (%d)", ret);
1442                 return;
1443         }
1444
1445         if (status & dd->irq_param[10]) {
1446                 read_buf[0] = false;
1447                 clear = 0xFFFF;
1448         } else if (status & dd->irq_param[9]) {
1449                 test = status & (dd->irq_param[6] | dd->irq_param[7]);
1450
1451                 if (test == (dd->irq_param[6] | dd->irq_param[7]))
1452                         xbuf = ((status & dd->irq_param[5]) != 0) ? 0 : 1;
1453                 else if (test == dd->irq_param[6])
1454                         xbuf = 0;
1455                 else if (test == dd->irq_param[7])
1456                         xbuf = 1;
1457                 else {
1458                         ERROR("unexpected IRQ handler case");
1459                         return;
1460                 }
1461                 read_buf[1] = true;
1462                 address[1] = xbuf ? dd->irq_param[2] : dd->irq_param[1];
1463
1464                 address[0] = dd->irq_param[3];
1465                 clear = dd->irq_param[6] | dd->irq_param[7] |
1466                         dd->irq_param[8] | dd->irq_param[9];
1467         } else {
1468                 test = status & (dd->irq_param[6] | dd->irq_param[7]);
1469
1470                 if (test == 0)
1471                         return;
1472                 else if (test == (dd->irq_param[6] | dd->irq_param[7]))
1473                         xbuf = ((status & dd->irq_param[5]) == 0) ? 0 : 1;
1474                 else if (test == dd->irq_param[6])
1475                         xbuf = 0;
1476                 else if (test == dd->irq_param[7])
1477                         xbuf = 1;
1478                 else {
1479                         ERROR("unexpected IRQ handler case");
1480                         return;
1481                 }
1482
1483                 address[0] = xbuf ? dd->irq_param[2] : dd->irq_param[1];
1484                 clear = xbuf ? dd->irq_param[7] : dd->irq_param[6];
1485                 clear |= dd->irq_param[8];
1486         }
1487
1488         async_data = nl_alloc_attr(dd->outgoing_skb->data, FU_ASYNC_DATA,
1489                                    sizeof(*async_data) + dd->irq_param[4]);
1490         if (async_data == NULL) {
1491                 ERROR("can't add data to async IRQ buffer 1");
1492                 return;
1493         }
1494
1495         async_data->status = status;
1496         if (read_buf[0]) {
1497                 async_data->address = address[0];
1498                 async_data->length = dd->irq_param[4];
1499                 ret = dd->chip.read(dd, address[0], async_data->data,
1500                                     dd->irq_param[4]);
1501         }
1502
1503         if (read_buf[1] && ret == 0) {
1504                 async_data = nl_alloc_attr(dd->outgoing_skb->data,
1505                                            FU_ASYNC_DATA,
1506                                            sizeof(*async_data) +
1507                                                 dd->irq_param[4]);
1508                 if (async_data == NULL) {
1509                         ERROR("can't add data to async IRQ buffer 2");
1510                         nl_msg_init(dd->outgoing_skb->data, dd->nl_family.id,
1511                                     dd->nl_seq - 1, MC_FUSION);
1512                         return;
1513                 }
1514                 async_data->address = address[1];
1515                 async_data->length = dd->irq_param[4];
1516                 async_data->status = status;
1517                 ret = dd->chip.read(dd, address[1], async_data->data,
1518                                     dd->irq_param[4]);
1519         }
1520
1521         ret2 = dd->chip.write(dd, dd->irq_param[0], (u8 *)&clear,
1522                              sizeof(clear));
1523         if (ret2 < 0)
1524                 ERROR("can't clear IRQ status (%d)", ret2);
1525
1526         if (ret < 0) {
1527                 ERROR("can't read IRQ buffer (%d)", ret);
1528                 nl_msg_init(dd->outgoing_skb->data, dd->nl_family.id,
1529                             dd->nl_seq - 1, MC_FUSION);
1530         } else {
1531                 (void)skb_put(dd->outgoing_skb,
1532                               NL_SIZE(dd->outgoing_skb->data));
1533                 ret = genlmsg_multicast(dd->outgoing_skb, 0,
1534                                         dd->nl_mc_groups[MC_FUSION].id,
1535                                         GFP_KERNEL);
1536                 if (ret < 0) {
1537                         ERROR("can't send IRQ buffer %d", ret);
1538                         msleep(300);
1539                         if (read_buf[0] == false ||
1540                             (++dd->send_fail_count >= 10 &&
1541                              dd->fusion_process != (pid_t)0)) {
1542                                 (void)kill_pid(
1543                                         find_get_pid(dd->fusion_process),
1544                                         SIGKILL, 1);
1545                                 wake_up_process(dd->thread);
1546                         }
1547                 } else {
1548                         dd->send_fail_count = 0;
1549                 }
1550                 ret = nl_msg_new(dd, MC_FUSION);
1551                 if (ret < 0)
1552                         ERROR("could not allocate outgoing skb (%d)", ret);
1553         }
1554 }
1555
1556 /****************************************************************************\
1557 * Processing thread                                                          *
1558 \****************************************************************************/
1559
1560 static int processing_thread(void *arg)
1561 {
1562         struct dev_data         *dd = arg;
1563         struct maxim_sti_pdata  *pdata = dd->spi->dev.platform_data;
1564         struct sk_buff          *skb;
1565         char                    *argv[] = { pdata->touch_fusion, "daemon",
1566                                             pdata->nl_family,
1567                                             pdata->config_file, NULL };
1568         int                     ret, ret2;
1569
1570         sched_setscheduler(current, SCHED_FIFO, &dd->thread_sched);
1571
1572         while (!kthread_should_stop()) {
1573                 set_current_state(TASK_INTERRUPTIBLE);
1574
1575                 /* ensure that we have outgoing skb */
1576                 if (dd->outgoing_skb == NULL)
1577                         if (nl_msg_new(dd, MC_FUSION) < 0) {
1578                                 schedule();
1579                                 continue;
1580                         }
1581
1582                 /* priority 1: start up fusion process */
1583                 if (dd->fusion_process != (pid_t)0 && get_pid_task(
1584                                         find_get_pid(dd->fusion_process),
1585                                         PIDTYPE_PID) == NULL) {
1586                         stop_scan_canned(dd);
1587                         dd->start_fusion = true;
1588                         dd->fusion_process = (pid_t)0;
1589 #if INPUT_ENABLE_DISABLE
1590                         dd->input_no_deconfig = true;
1591 #endif
1592                 }
1593                 if (dd->start_fusion) {
1594                         do {
1595                                 ret = call_usermodehelper(argv[0], argv, NULL,
1596                                                           UMH_WAIT_EXEC);
1597                                 if (ret != 0)
1598                                         msleep(100);
1599                         } while (ret != 0 && !kthread_should_stop());
1600                         dd->start_fusion = false;
1601                 }
1602                 if (kthread_should_stop())
1603                         break;
1604
1605                 /* priority 2: process pending Netlink messages */
1606                 while ((skb = skb_dequeue(&dd->incoming_skb_queue)) != NULL) {
1607                         if (kthread_should_stop())
1608                                 break;
1609                         if (nl_process_msg(dd, skb) < 0)
1610                                 skb_queue_purge(&dd->incoming_skb_queue);
1611                 }
1612                 if (kthread_should_stop())
1613                         break;
1614
1615                 /* priority 3: suspend/resume */
1616                 if (dd->suspend_in_progress) {
1617                         if (dd->irq_registered)
1618                                 disable_irq(dd->spi->irq);
1619                         stop_scan_canned(dd);
1620                         complete(&dd->suspend_resume);
1621
1622                         INFO("%s: suspended.", __func__);
1623
1624                         dd->input_ignore = true;
1625                         while (!dd->resume_in_progress &&
1626                                         !kthread_should_stop()) {
1627                                 /* the line below is a MUST */
1628                                 set_current_state(TASK_INTERRUPTIBLE);
1629                                 schedule();
1630                         }
1631                         if (kthread_should_stop())
1632                                 break;
1633
1634                         INFO("%s: resuming.", __func__);
1635
1636 #if !SUSPEND_POWER_OFF
1637                         start_scan_canned(dd);
1638 #endif
1639                         dd->resume_in_progress = false;
1640                         dd->suspend_in_progress = false;
1641                         complete(&dd->suspend_resume);
1642
1643                         do {
1644                                 ret = nl_add_attr(dd->outgoing_skb->data,
1645                                                   FU_RESUME, NULL, 0);
1646                                 if (ret < 0) {
1647                                         ERROR("can't add data to resume " \
1648                                               "buffer");
1649                                         nl_msg_init(dd->outgoing_skb->data,
1650                                                     dd->nl_family.id,
1651                                                     dd->nl_seq - 1, MC_FUSION);
1652                                         msleep(100);
1653                                         continue;
1654                                 }
1655                                 (void)skb_put(dd->outgoing_skb,
1656                                               NL_SIZE(dd->outgoing_skb->data));
1657                                 ret = genlmsg_multicast(dd->outgoing_skb, 0,
1658                                                 dd->nl_mc_groups[MC_FUSION].id,
1659                                                 GFP_KERNEL);
1660                                 if (ret < 0) {
1661                                         ERROR("can't send resume message %d",
1662                                               ret);
1663                                         msleep(100);
1664                                 }
1665                                 ret2 = nl_msg_new(dd, MC_FUSION);
1666                                 if (ret2 < 0)
1667                                         ERROR("could not allocate outgoing " \
1668                                               "skb (%d)", ret2);
1669                         } while (ret != 0 && !kthread_should_stop());
1670                         if (kthread_should_stop())
1671                                 break;
1672                         if (ret == 0)
1673                                 INFO("%s: resumed.", __func__);
1674                 }
1675
1676                 /* priority 4: service interrupt */
1677                 if (dd->irq_registered && pdata->irq(pdata) == 0)
1678                         dd->service_irq(dd);
1679                 if (dd->irq_registered && pdata->irq(pdata) == 0)
1680                         continue;
1681
1682                 /* nothing more to do; sleep */
1683                 schedule();
1684         }
1685
1686         return 0;
1687 }
1688
1689 /****************************************************************************\
1690 * Driver initialization                                                      *
1691 \****************************************************************************/
1692
1693 static int probe(struct spi_device *spi)
1694 {
1695         struct maxim_sti_pdata  *pdata = spi->dev.platform_data;
1696         struct dev_data         *dd;
1697         unsigned long           flags;
1698         int                     ret, i;
1699         void                    *ptr;
1700
1701         /* validate platform data */
1702         if (pdata == NULL || pdata->init == NULL || pdata->reset == NULL ||
1703                 pdata->irq == NULL || pdata->touch_fusion == NULL ||
1704                 pdata->config_file == NULL || pdata->nl_family == NULL ||
1705                 GENL_CHK(pdata->nl_family) ||
1706                 pdata->nl_mc_groups < MC_REQUIRED_GROUPS ||
1707                 pdata->chip_access_method == 0 ||
1708                 pdata->chip_access_method > ARRAY_SIZE(chip_access_methods) ||
1709                 pdata->default_reset_state > 1)
1710                         return -EINVAL;
1711
1712         /* device context: allocate structure */
1713         dd = kzalloc(sizeof(*dd) + pdata->tx_buf_size + pdata->rx_buf_size +
1714                      sizeof(*dd->nl_ops) * pdata->nl_mc_groups +
1715                      sizeof(*dd->nl_mc_groups) * pdata->nl_mc_groups,
1716                      GFP_KERNEL);
1717         if (dd == NULL)
1718                 return -ENOMEM;
1719
1720         /* device context: set up dynamic allocation pointers */
1721         ptr = (void *)dd + sizeof(*dd);
1722         if (pdata->tx_buf_size > 0) {
1723                 dd->tx_buf = ptr;
1724                 ptr += pdata->tx_buf_size;
1725         }
1726         if (pdata->rx_buf_size > 0) {
1727                 dd->rx_buf = ptr;
1728                 ptr += pdata->rx_buf_size;
1729         }
1730         dd->nl_ops = ptr;
1731         ptr += sizeof(*dd->nl_ops) * pdata->nl_mc_groups;
1732         dd->nl_mc_groups = ptr;
1733
1734         /* device context: initialize structure members */
1735         spi_set_drvdata(spi, dd);
1736         dd->spi = spi;
1737         dd->nl_seq = 1;
1738         init_completion(&dd->suspend_resume);
1739         memset(dd->tx_buf, 0xFF, sizeof(dd->tx_buf));
1740         (void)set_chip_access_method(dd, pdata->chip_access_method);
1741
1742         /* initialize regulators */
1743         regulator_init(dd);
1744
1745         /* initialize platform */
1746         ret = pdata->init(pdata, true);
1747         if (ret < 0)
1748                 goto platform_failure;
1749
1750         /* power-up and reset-high */
1751         ret = regulator_control(dd, true);
1752         if (ret < 0)
1753                 goto platform_failure;
1754         usleep_range(300, 400);
1755         pdata->reset(pdata, 1);
1756
1757         /* start processing thread */
1758         dd->thread_sched.sched_priority = MAX_USER_RT_PRIO / 2;
1759         dd->thread = kthread_run(processing_thread, dd, pdata->nl_family);
1760         if (IS_ERR(dd->thread)) {
1761                 ret = PTR_ERR(dd->thread);
1762                 goto platform_failure;
1763         }
1764
1765         /* Netlink: register GENL family */
1766         dd->nl_family.id      = GENL_ID_GENERATE;
1767         dd->nl_family.version = NL_FAMILY_VERSION;
1768         GENL_COPY(dd->nl_family.name, pdata->nl_family);
1769         ret = genl_register_family(&dd->nl_family);
1770         if (ret < 0)
1771                 goto nl_family_failure;
1772
1773         /* Netlink: register family ops */
1774         for (i = 0; i < MC_REQUIRED_GROUPS; i++) {
1775                 dd->nl_ops[i].cmd = i;
1776                 dd->nl_ops[i].doit = nl_callback_noop;
1777         }
1778         dd->nl_ops[MC_DRIVER].doit = nl_callback_driver;
1779         dd->nl_ops[MC_FUSION].doit = nl_callback_fusion;
1780         for (i = 0; i < MC_REQUIRED_GROUPS; i++) {
1781                 ret = genl_register_ops(&dd->nl_family, &dd->nl_ops[i]);
1782                 if (ret < 0)
1783                         goto nl_failure;
1784         }
1785
1786         /* Netlink: register family multicast groups */
1787         GENL_COPY(dd->nl_mc_groups[MC_DRIVER].name, MC_DRIVER_NAME);
1788         GENL_COPY(dd->nl_mc_groups[MC_FUSION].name, MC_FUSION_NAME);
1789         for (i = 0; i < MC_REQUIRED_GROUPS; i++) {
1790                 ret = genl_register_mc_group(&dd->nl_family,
1791                                              &dd->nl_mc_groups[i]);
1792                 if (ret < 0)
1793                         goto nl_failure;
1794         }
1795         dd->nl_mc_group_count = MC_REQUIRED_GROUPS;
1796
1797         /* Netlink: pre-allocate outgoing skb */
1798         ret = nl_msg_new(dd, MC_FUSION);
1799         if (ret < 0)
1800                 goto nl_failure;
1801
1802         /* Netlink: initialize incoming skb queue */
1803         skb_queue_head_init(&dd->incoming_skb_queue);
1804
1805         /* Netlink: ready to start processing incoming messages */
1806         dd->nl_enabled = true;
1807
1808         /* add us to the devices list */
1809         spin_lock_irqsave(&dev_lock, flags);
1810         list_add_tail(&dd->dev_list, &dev_list);
1811         spin_unlock_irqrestore(&dev_lock, flags);
1812
1813 #if NV_ENABLE_CPU_BOOST
1814         dd->last_irq_jiffies = jiffies;
1815 #endif
1816
1817 #ifdef CONFIG_PM_WAKELOCKS
1818         wakeup_source_init(&dd->ws, "touch_fusion");
1819         __pm_stay_awake(&dd->ws);
1820 #endif
1821         /* start up Touch Fusion */
1822         dd->start_fusion = true;
1823         wake_up_process(dd->thread);
1824         INFO("driver loaded; version %s; release date %s", DRIVER_VERSION,
1825              DRIVER_RELEASE);
1826
1827         return 0;
1828
1829 nl_failure:
1830         genl_unregister_family(&dd->nl_family);
1831 nl_family_failure:
1832         (void)kthread_stop(dd->thread);
1833 platform_failure:
1834         pdata->init(pdata, false);
1835         kfree(dd);
1836         return ret;
1837 }
1838
1839 static int remove(struct spi_device *spi)
1840 {
1841         struct maxim_sti_pdata  *pdata = spi->dev.platform_data;
1842         struct dev_data         *dd = spi_get_drvdata(spi);
1843         unsigned long           flags;
1844         u8                      i;
1845
1846         INFO("removing...\n");
1847
1848         if (dd->irq_registered)
1849                 disable_irq(dd->spi->irq);
1850
1851         dd->nl_enabled = false;
1852         (void)kthread_stop(dd->thread);
1853
1854         if (dd->fusion_process != (pid_t)0)
1855                 (void)kill_pid(find_get_pid(dd->fusion_process), SIGKILL, 1);
1856
1857         /* BEWARE: tear-down sequence below is carefully staged:            */
1858         /* 1) first the feeder of Netlink messages to the processing thread */
1859         /*    is turned off                                                 */
1860         /* 2) then the thread itself is shut down                           */
1861         /* 3) then Netlink family is torn down since no one would be using  */
1862         /*    it at this point                                              */
1863         /* 4) above step (3) insures that all Netlink senders are           */
1864         /*    definitely gone and it is safe to free up outgoing skb buffer */
1865         /*    and incoming skb queue                                        */
1866         genl_unregister_family(&dd->nl_family);
1867         kfree_skb(dd->outgoing_skb);
1868         skb_queue_purge(&dd->incoming_skb_queue);
1869
1870         for (i = 0; i < INPUT_DEVICES; i++)
1871                 if (dd->input_dev[i])
1872                         input_unregister_device(dd->input_dev[i]);
1873
1874         if (dd->irq_registered)
1875                 free_irq(dd->spi->irq, dd);
1876
1877         stop_scan_canned(dd);
1878
1879         spin_lock_irqsave(&dev_lock, flags);
1880         list_del(&dd->dev_list);
1881         spin_unlock_irqrestore(&dev_lock, flags);
1882
1883         pdata->reset(pdata, 0);
1884         usleep_range(100, 120);
1885         regulator_control(dd, false);
1886         pdata->init(pdata, false);
1887
1888         kfree(dd);
1889
1890         INFO("driver unloaded");
1891         return 0;
1892 }
1893
1894 static void shutdown(struct spi_device *spi)
1895 {
1896         struct maxim_sti_pdata  *pdata = spi->dev.platform_data;
1897         struct dev_data         *dd = spi_get_drvdata(spi);
1898
1899         INFO("doing shutdown...\n");
1900
1901         if (dd->irq_registered)
1902                 disable_irq(dd->spi->irq);
1903
1904         dd->nl_enabled = false;
1905         (void)kthread_stop(dd->thread);
1906
1907         if (dd->fusion_process != (pid_t)0)
1908                 (void)kill_pid(find_get_pid(dd->fusion_process), SIGKILL, 1);
1909
1910         stop_scan_canned(dd);
1911
1912         pdata->reset(pdata, 0);
1913         usleep_range(100, 120);
1914         regulator_control(dd, false);
1915 }
1916
1917 /****************************************************************************\
1918 * Module initialization                                                      *
1919 \****************************************************************************/
1920
1921 static const struct spi_device_id id[] = {
1922         { MAXIM_STI_NAME, 0 },
1923         { }
1924 };
1925
1926 MODULE_DEVICE_TABLE(spi, id);
1927
1928 static struct spi_driver driver = {
1929         .probe          = probe,
1930         .remove         = remove,
1931         .shutdown       = shutdown,
1932         .id_table       = id,
1933         .driver = {
1934                 .name   = MAXIM_STI_NAME,
1935                 .owner  = THIS_MODULE,
1936 #if defined(CONFIG_PM_SLEEP) && !INPUT_ENABLE_DISABLE
1937                 .pm     = &pm_ops,
1938 #endif
1939         },
1940 };
1941
1942 static int __devinit maxim_sti_init(void)
1943 {
1944         INIT_LIST_HEAD(&dev_list);
1945         spin_lock_init(&dev_lock);
1946         return spi_register_driver(&driver);
1947 }
1948
1949 static void __exit maxim_sti_exit(void)
1950 {
1951         spi_unregister_driver(&driver);
1952 }
1953
1954 module_init(maxim_sti_init);
1955 module_exit(maxim_sti_exit);
1956
1957 MODULE_AUTHOR("Maxim Integrated Products, Inc.");
1958 MODULE_DESCRIPTION("Maxim SmartTouch Imager Touchscreen Driver");
1959 MODULE_LICENSE("GPL v2");
1960 MODULE_VERSION(DRIVER_VERSION);
1961