mtd/nand/sh_flctl: Replace the dangerous mtd_to_flctl macro
[linux-2.6.git] / drivers / mtd / chips / cfi_cmdset_0001.c
1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  *
8  * 10/10/2000   Nicolas Pitre <nico@fluxnic.net>
9  *      - completely revamped method functions so they are aware and
10  *        independent of the flash geometry (buswidth, interleave, etc.)
11  *      - scalability vs code size is completely set at compile-time
12  *        (see include/linux/mtd/cfi.h for selection)
13  *      - optimized write buffer method
14  * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
15  *      - reworked lock/unlock/erase support for var size flash
16  * 21/03/2007   Rodolfo Giometti <giometti@linux.it>
17  *      - auto unlock sectors on resume for auto locking flash on power up
18  */
19
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
25 #include <asm/io.h>
26 #include <asm/byteorder.h>
27
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/reboot.h>
33 #include <linux/bitmap.h>
34 #include <linux/mtd/xip.h>
35 #include <linux/mtd/map.h>
36 #include <linux/mtd/mtd.h>
37 #include <linux/mtd/compatmac.h>
38 #include <linux/mtd/cfi.h>
39
40 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
41 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
42
43 // debugging, turns off buffer write mode if set to 1
44 #define FORCE_WORD_WRITE 0
45
46 /* Intel chips */
47 #define I82802AB        0x00ad
48 #define I82802AC        0x00ac
49 #define PF38F4476       0x881c
50 /* STMicroelectronics chips */
51 #define M50LPW080       0x002F
52 #define M50FLW080A      0x0080
53 #define M50FLW080B      0x0081
54 /* Atmel chips */
55 #define AT49BV640D      0x02de
56 #define AT49BV640DT     0x02db
57
58 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
59 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
60 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
61 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
62 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
63 static void cfi_intelext_sync (struct mtd_info *);
64 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
65 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
66 #ifdef CONFIG_MTD_OTP
67 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
68 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
69 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
70 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
71 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
72                                             struct otp_info *, size_t);
73 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
74                                             struct otp_info *, size_t);
75 #endif
76 static int cfi_intelext_suspend (struct mtd_info *);
77 static void cfi_intelext_resume (struct mtd_info *);
78 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
79
80 static void cfi_intelext_destroy(struct mtd_info *);
81
82 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
83
84 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
85 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
86
87 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
88                      size_t *retlen, void **virt, resource_size_t *phys);
89 static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
90
91 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
92 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
93 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
94 #include "fwh_lock.h"
95
96
97
98 /*
99  *  *********** SETUP AND PROBE BITS  ***********
100  */
101
102 static struct mtd_chip_driver cfi_intelext_chipdrv = {
103         .probe          = NULL, /* Not usable directly */
104         .destroy        = cfi_intelext_destroy,
105         .name           = "cfi_cmdset_0001",
106         .module         = THIS_MODULE
107 };
108
109 /* #define DEBUG_LOCK_BITS */
110 /* #define DEBUG_CFI_FEATURES */
111
112 #ifdef DEBUG_CFI_FEATURES
113 static void cfi_tell_features(struct cfi_pri_intelext *extp)
114 {
115         int i;
116         printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
117         printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
118         printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
119         printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
120         printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
121         printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
122         printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
123         printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
124         printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
125         printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
126         printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
127         printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
128         printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
129         for (i=11; i<32; i++) {
130                 if (extp->FeatureSupport & (1<<i))
131                         printk("     - Unknown Bit %X:      supported\n", i);
132         }
133
134         printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
135         printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
136         for (i=1; i<8; i++) {
137                 if (extp->SuspendCmdSupport & (1<<i))
138                         printk("     - Unknown Bit %X:               supported\n", i);
139         }
140
141         printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
142         printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
143         printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
144         for (i=2; i<3; i++) {
145                 if (extp->BlkStatusRegMask & (1<<i))
146                         printk("     - Unknown Bit %X Active: yes\n",i);
147         }
148         printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
149         printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
150         for (i=6; i<16; i++) {
151                 if (extp->BlkStatusRegMask & (1<<i))
152                         printk("     - Unknown Bit %X Active: yes\n",i);
153         }
154
155         printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
156                extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
157         if (extp->VppOptimal)
158                 printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
159                        extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
160 }
161 #endif
162
163 /* Atmel chips don't use the same PRI format as Intel chips */
164 static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
165 {
166         struct map_info *map = mtd->priv;
167         struct cfi_private *cfi = map->fldrv_priv;
168         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
169         struct cfi_pri_atmel atmel_pri;
170         uint32_t features = 0;
171
172         /* Reverse byteswapping */
173         extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
174         extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
175         extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
176
177         memcpy(&atmel_pri, extp, sizeof(atmel_pri));
178         memset((char *)extp + 5, 0, sizeof(*extp) - 5);
179
180         printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
181
182         if (atmel_pri.Features & 0x01) /* chip erase supported */
183                 features |= (1<<0);
184         if (atmel_pri.Features & 0x02) /* erase suspend supported */
185                 features |= (1<<1);
186         if (atmel_pri.Features & 0x04) /* program suspend supported */
187                 features |= (1<<2);
188         if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
189                 features |= (1<<9);
190         if (atmel_pri.Features & 0x20) /* page mode read supported */
191                 features |= (1<<7);
192         if (atmel_pri.Features & 0x40) /* queued erase supported */
193                 features |= (1<<4);
194         if (atmel_pri.Features & 0x80) /* Protection bits supported */
195                 features |= (1<<6);
196
197         extp->FeatureSupport = features;
198
199         /* burst write mode not supported */
200         cfi->cfiq->BufWriteTimeoutTyp = 0;
201         cfi->cfiq->BufWriteTimeoutMax = 0;
202 }
203
204 static void fixup_at49bv640dx_lock(struct mtd_info *mtd, void *param)
205 {
206         struct map_info *map = mtd->priv;
207         struct cfi_private *cfi = map->fldrv_priv;
208         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
209
210         cfip->FeatureSupport |= (1 << 5);
211         mtd->flags |= MTD_POWERUP_LOCK;
212 }
213
214 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
215 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
216 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
217 {
218         struct map_info *map = mtd->priv;
219         struct cfi_private *cfi = map->fldrv_priv;
220         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
221
222         printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
223                             "erase on write disabled.\n");
224         extp->SuspendCmdSupport &= ~1;
225 }
226 #endif
227
228 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
229 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
230 {
231         struct map_info *map = mtd->priv;
232         struct cfi_private *cfi = map->fldrv_priv;
233         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
234
235         if (cfip && (cfip->FeatureSupport&4)) {
236                 cfip->FeatureSupport &= ~4;
237                 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
238         }
239 }
240 #endif
241
242 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
243 {
244         struct map_info *map = mtd->priv;
245         struct cfi_private *cfi = map->fldrv_priv;
246
247         cfi->cfiq->BufWriteTimeoutTyp = 0;      /* Not supported */
248         cfi->cfiq->BufWriteTimeoutMax = 0;      /* Not supported */
249 }
250
251 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
252 {
253         struct map_info *map = mtd->priv;
254         struct cfi_private *cfi = map->fldrv_priv;
255
256         /* Note this is done after the region info is endian swapped */
257         cfi->cfiq->EraseRegionInfo[1] =
258                 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
259 };
260
261 static void fixup_use_point(struct mtd_info *mtd, void *param)
262 {
263         struct map_info *map = mtd->priv;
264         if (!mtd->point && map_is_linear(map)) {
265                 mtd->point   = cfi_intelext_point;
266                 mtd->unpoint = cfi_intelext_unpoint;
267         }
268 }
269
270 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
271 {
272         struct map_info *map = mtd->priv;
273         struct cfi_private *cfi = map->fldrv_priv;
274         if (cfi->cfiq->BufWriteTimeoutTyp) {
275                 printk(KERN_INFO "Using buffer write method\n" );
276                 mtd->write = cfi_intelext_write_buffers;
277                 mtd->writev = cfi_intelext_writev;
278         }
279 }
280
281 /*
282  * Some chips power-up with all sectors locked by default.
283  */
284 static void fixup_unlock_powerup_lock(struct mtd_info *mtd, void *param)
285 {
286         struct map_info *map = mtd->priv;
287         struct cfi_private *cfi = map->fldrv_priv;
288         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
289
290         if (cfip->FeatureSupport&32) {
291                 printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
292                 mtd->flags |= MTD_POWERUP_LOCK;
293         }
294 }
295
296 static struct cfi_fixup cfi_fixup_table[] = {
297         { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
298         { CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock, NULL },
299         { CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock, NULL },
300 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
301         { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
302 #endif
303 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
304         { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
305 #endif
306 #if !FORCE_WORD_WRITE
307         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
308 #endif
309         { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
310         { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
311         { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock, NULL, },
312         { 0, 0, NULL, NULL }
313 };
314
315 static struct cfi_fixup jedec_fixup_table[] = {
316         { CFI_MFR_INTEL, I82802AB,   fixup_use_fwh_lock, NULL, },
317         { CFI_MFR_INTEL, I82802AC,   fixup_use_fwh_lock, NULL, },
318         { CFI_MFR_ST,    M50LPW080,  fixup_use_fwh_lock, NULL, },
319         { CFI_MFR_ST,    M50FLW080A, fixup_use_fwh_lock, NULL, },
320         { CFI_MFR_ST,    M50FLW080B, fixup_use_fwh_lock, NULL, },
321         { 0, 0, NULL, NULL }
322 };
323 static struct cfi_fixup fixup_table[] = {
324         /* The CFI vendor ids and the JEDEC vendor IDs appear
325          * to be common.  It is like the devices id's are as
326          * well.  This table is to pick all cases where
327          * we know that is the case.
328          */
329         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
330         { 0, 0, NULL, NULL }
331 };
332
333 static void cfi_fixup_major_minor(struct cfi_private *cfi,
334                                                 struct cfi_pri_intelext *extp)
335 {
336         if (cfi->mfr == CFI_MFR_INTEL &&
337                         cfi->id == PF38F4476 && extp->MinorVersion == '3')
338                 extp->MinorVersion = '1';
339 }
340
341 static inline struct cfi_pri_intelext *
342 read_pri_intelext(struct map_info *map, __u16 adr)
343 {
344         struct cfi_private *cfi = map->fldrv_priv;
345         struct cfi_pri_intelext *extp;
346         unsigned int extra_size = 0;
347         unsigned int extp_size = sizeof(*extp);
348
349  again:
350         extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
351         if (!extp)
352                 return NULL;
353
354         cfi_fixup_major_minor(cfi, extp);
355
356         if (extp->MajorVersion != '1' ||
357             (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
358                 printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
359                        "version %c.%c.\n",  extp->MajorVersion,
360                        extp->MinorVersion);
361                 kfree(extp);
362                 return NULL;
363         }
364
365         /* Do some byteswapping if necessary */
366         extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
367         extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
368         extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
369
370         if (extp->MinorVersion >= '0') {
371                 extra_size = 0;
372
373                 /* Protection Register info */
374                 extra_size += (extp->NumProtectionFields - 1) *
375                               sizeof(struct cfi_intelext_otpinfo);
376         }
377
378         if (extp->MinorVersion >= '1') {
379                 /* Burst Read info */
380                 extra_size += 2;
381                 if (extp_size < sizeof(*extp) + extra_size)
382                         goto need_more;
383                 extra_size += extp->extra[extra_size - 1];
384         }
385
386         if (extp->MinorVersion >= '3') {
387                 int nb_parts, i;
388
389                 /* Number of hardware-partitions */
390                 extra_size += 1;
391                 if (extp_size < sizeof(*extp) + extra_size)
392                         goto need_more;
393                 nb_parts = extp->extra[extra_size - 1];
394
395                 /* skip the sizeof(partregion) field in CFI 1.4 */
396                 if (extp->MinorVersion >= '4')
397                         extra_size += 2;
398
399                 for (i = 0; i < nb_parts; i++) {
400                         struct cfi_intelext_regioninfo *rinfo;
401                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
402                         extra_size += sizeof(*rinfo);
403                         if (extp_size < sizeof(*extp) + extra_size)
404                                 goto need_more;
405                         rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
406                         extra_size += (rinfo->NumBlockTypes - 1)
407                                       * sizeof(struct cfi_intelext_blockinfo);
408                 }
409
410                 if (extp->MinorVersion >= '4')
411                         extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
412
413                 if (extp_size < sizeof(*extp) + extra_size) {
414                         need_more:
415                         extp_size = sizeof(*extp) + extra_size;
416                         kfree(extp);
417                         if (extp_size > 4096) {
418                                 printk(KERN_ERR
419                                         "%s: cfi_pri_intelext is too fat\n",
420                                         __func__);
421                                 return NULL;
422                         }
423                         goto again;
424                 }
425         }
426
427         return extp;
428 }
429
430 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
431 {
432         struct cfi_private *cfi = map->fldrv_priv;
433         struct mtd_info *mtd;
434         int i;
435
436         mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
437         if (!mtd) {
438                 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
439                 return NULL;
440         }
441         mtd->priv = map;
442         mtd->type = MTD_NORFLASH;
443
444         /* Fill in the default mtd operations */
445         mtd->erase   = cfi_intelext_erase_varsize;
446         mtd->read    = cfi_intelext_read;
447         mtd->write   = cfi_intelext_write_words;
448         mtd->sync    = cfi_intelext_sync;
449         mtd->lock    = cfi_intelext_lock;
450         mtd->unlock  = cfi_intelext_unlock;
451         mtd->suspend = cfi_intelext_suspend;
452         mtd->resume  = cfi_intelext_resume;
453         mtd->flags   = MTD_CAP_NORFLASH;
454         mtd->name    = map->name;
455         mtd->writesize = 1;
456
457         mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
458
459         if (cfi->cfi_mode == CFI_MODE_CFI) {
460                 /*
461                  * It's a real CFI chip, not one for which the probe
462                  * routine faked a CFI structure. So we read the feature
463                  * table from it.
464                  */
465                 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
466                 struct cfi_pri_intelext *extp;
467
468                 extp = read_pri_intelext(map, adr);
469                 if (!extp) {
470                         kfree(mtd);
471                         return NULL;
472                 }
473
474                 /* Install our own private info structure */
475                 cfi->cmdset_priv = extp;
476
477                 cfi_fixup(mtd, cfi_fixup_table);
478
479 #ifdef DEBUG_CFI_FEATURES
480                 /* Tell the user about it in lots of lovely detail */
481                 cfi_tell_features(extp);
482 #endif
483
484                 if(extp->SuspendCmdSupport & 1) {
485                         printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
486                 }
487         }
488         else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
489                 /* Apply jedec specific fixups */
490                 cfi_fixup(mtd, jedec_fixup_table);
491         }
492         /* Apply generic fixups */
493         cfi_fixup(mtd, fixup_table);
494
495         for (i=0; i< cfi->numchips; i++) {
496                 if (cfi->cfiq->WordWriteTimeoutTyp)
497                         cfi->chips[i].word_write_time =
498                                 1<<cfi->cfiq->WordWriteTimeoutTyp;
499                 else
500                         cfi->chips[i].word_write_time = 50000;
501
502                 if (cfi->cfiq->BufWriteTimeoutTyp)
503                         cfi->chips[i].buffer_write_time =
504                                 1<<cfi->cfiq->BufWriteTimeoutTyp;
505                 /* No default; if it isn't specified, we won't use it */
506
507                 if (cfi->cfiq->BlockEraseTimeoutTyp)
508                         cfi->chips[i].erase_time =
509                                 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
510                 else
511                         cfi->chips[i].erase_time = 2000000;
512
513                 if (cfi->cfiq->WordWriteTimeoutTyp &&
514                     cfi->cfiq->WordWriteTimeoutMax)
515                         cfi->chips[i].word_write_time_max =
516                                 1<<(cfi->cfiq->WordWriteTimeoutTyp +
517                                     cfi->cfiq->WordWriteTimeoutMax);
518                 else
519                         cfi->chips[i].word_write_time_max = 50000 * 8;
520
521                 if (cfi->cfiq->BufWriteTimeoutTyp &&
522                     cfi->cfiq->BufWriteTimeoutMax)
523                         cfi->chips[i].buffer_write_time_max =
524                                 1<<(cfi->cfiq->BufWriteTimeoutTyp +
525                                     cfi->cfiq->BufWriteTimeoutMax);
526
527                 if (cfi->cfiq->BlockEraseTimeoutTyp &&
528                     cfi->cfiq->BlockEraseTimeoutMax)
529                         cfi->chips[i].erase_time_max =
530                                 1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
531                                        cfi->cfiq->BlockEraseTimeoutMax);
532                 else
533                         cfi->chips[i].erase_time_max = 2000000 * 8;
534
535                 cfi->chips[i].ref_point_counter = 0;
536                 init_waitqueue_head(&(cfi->chips[i].wq));
537         }
538
539         map->fldrv = &cfi_intelext_chipdrv;
540
541         return cfi_intelext_setup(mtd);
542 }
543 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
544 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
545 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
546 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
547 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
548
549 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
550 {
551         struct map_info *map = mtd->priv;
552         struct cfi_private *cfi = map->fldrv_priv;
553         unsigned long offset = 0;
554         int i,j;
555         unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
556
557         //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
558
559         mtd->size = devsize * cfi->numchips;
560
561         mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
562         mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
563                         * mtd->numeraseregions, GFP_KERNEL);
564         if (!mtd->eraseregions) {
565                 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
566                 goto setup_err;
567         }
568
569         for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
570                 unsigned long ernum, ersize;
571                 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
572                 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
573
574                 if (mtd->erasesize < ersize) {
575                         mtd->erasesize = ersize;
576                 }
577                 for (j=0; j<cfi->numchips; j++) {
578                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
579                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
580                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
581                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
582                 }
583                 offset += (ersize * ernum);
584         }
585
586         if (offset != devsize) {
587                 /* Argh */
588                 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
589                 goto setup_err;
590         }
591
592         for (i=0; i<mtd->numeraseregions;i++){
593                 printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
594                        i,(unsigned long long)mtd->eraseregions[i].offset,
595                        mtd->eraseregions[i].erasesize,
596                        mtd->eraseregions[i].numblocks);
597         }
598
599 #ifdef CONFIG_MTD_OTP
600         mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
601         mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
602         mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
603         mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
604         mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
605         mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
606 #endif
607
608         /* This function has the potential to distort the reality
609            a bit and therefore should be called last. */
610         if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
611                 goto setup_err;
612
613         __module_get(THIS_MODULE);
614         register_reboot_notifier(&mtd->reboot_notifier);
615         return mtd;
616
617  setup_err:
618         kfree(mtd->eraseregions);
619         kfree(mtd);
620         kfree(cfi->cmdset_priv);
621         return NULL;
622 }
623
624 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
625                                         struct cfi_private **pcfi)
626 {
627         struct map_info *map = mtd->priv;
628         struct cfi_private *cfi = *pcfi;
629         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
630
631         /*
632          * Probing of multi-partition flash chips.
633          *
634          * To support multiple partitions when available, we simply arrange
635          * for each of them to have their own flchip structure even if they
636          * are on the same physical chip.  This means completely recreating
637          * a new cfi_private structure right here which is a blatent code
638          * layering violation, but this is still the least intrusive
639          * arrangement at this point. This can be rearranged in the future
640          * if someone feels motivated enough.  --nico
641          */
642         if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
643             && extp->FeatureSupport & (1 << 9)) {
644                 struct cfi_private *newcfi;
645                 struct flchip *chip;
646                 struct flchip_shared *shared;
647                 int offs, numregions, numparts, partshift, numvirtchips, i, j;
648
649                 /* Protection Register info */
650                 offs = (extp->NumProtectionFields - 1) *
651                        sizeof(struct cfi_intelext_otpinfo);
652
653                 /* Burst Read info */
654                 offs += extp->extra[offs+1]+2;
655
656                 /* Number of partition regions */
657                 numregions = extp->extra[offs];
658                 offs += 1;
659
660                 /* skip the sizeof(partregion) field in CFI 1.4 */
661                 if (extp->MinorVersion >= '4')
662                         offs += 2;
663
664                 /* Number of hardware partitions */
665                 numparts = 0;
666                 for (i = 0; i < numregions; i++) {
667                         struct cfi_intelext_regioninfo *rinfo;
668                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
669                         numparts += rinfo->NumIdentPartitions;
670                         offs += sizeof(*rinfo)
671                                 + (rinfo->NumBlockTypes - 1) *
672                                   sizeof(struct cfi_intelext_blockinfo);
673                 }
674
675                 if (!numparts)
676                         numparts = 1;
677
678                 /* Programming Region info */
679                 if (extp->MinorVersion >= '4') {
680                         struct cfi_intelext_programming_regioninfo *prinfo;
681                         prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
682                         mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
683                         mtd->flags &= ~MTD_BIT_WRITEABLE;
684                         printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
685                                map->name, mtd->writesize,
686                                cfi->interleave * prinfo->ControlValid,
687                                cfi->interleave * prinfo->ControlInvalid);
688                 }
689
690                 /*
691                  * All functions below currently rely on all chips having
692                  * the same geometry so we'll just assume that all hardware
693                  * partitions are of the same size too.
694                  */
695                 partshift = cfi->chipshift - __ffs(numparts);
696
697                 if ((1 << partshift) < mtd->erasesize) {
698                         printk( KERN_ERR
699                                 "%s: bad number of hw partitions (%d)\n",
700                                 __func__, numparts);
701                         return -EINVAL;
702                 }
703
704                 numvirtchips = cfi->numchips * numparts;
705                 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
706                 if (!newcfi)
707                         return -ENOMEM;
708                 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
709                 if (!shared) {
710                         kfree(newcfi);
711                         return -ENOMEM;
712                 }
713                 memcpy(newcfi, cfi, sizeof(struct cfi_private));
714                 newcfi->numchips = numvirtchips;
715                 newcfi->chipshift = partshift;
716
717                 chip = &newcfi->chips[0];
718                 for (i = 0; i < cfi->numchips; i++) {
719                         shared[i].writing = shared[i].erasing = NULL;
720                         spin_lock_init(&shared[i].lock);
721                         for (j = 0; j < numparts; j++) {
722                                 *chip = cfi->chips[i];
723                                 chip->start += j << partshift;
724                                 chip->priv = &shared[i];
725                                 /* those should be reset too since
726                                    they create memory references. */
727                                 init_waitqueue_head(&chip->wq);
728                                 spin_lock_init(&chip->_spinlock);
729                                 chip->mutex = &chip->_spinlock;
730                                 chip++;
731                         }
732                 }
733
734                 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
735                                   "--> %d partitions of %d KiB\n",
736                                   map->name, cfi->numchips, cfi->interleave,
737                                   newcfi->numchips, 1<<(newcfi->chipshift-10));
738
739                 map->fldrv_priv = newcfi;
740                 *pcfi = newcfi;
741                 kfree(cfi);
742         }
743
744         return 0;
745 }
746
747 /*
748  *  *********** CHIP ACCESS FUNCTIONS ***********
749  */
750 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
751 {
752         DECLARE_WAITQUEUE(wait, current);
753         struct cfi_private *cfi = map->fldrv_priv;
754         map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
755         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
756         unsigned long timeo = jiffies + HZ;
757
758         /* Prevent setting state FL_SYNCING for chip in suspended state. */
759         if (mode == FL_SYNCING && chip->oldstate != FL_READY)
760                 goto sleep;
761
762         switch (chip->state) {
763
764         case FL_STATUS:
765                 for (;;) {
766                         status = map_read(map, adr);
767                         if (map_word_andequal(map, status, status_OK, status_OK))
768                                 break;
769
770                         /* At this point we're fine with write operations
771                            in other partitions as they don't conflict. */
772                         if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
773                                 break;
774
775                         spin_unlock(chip->mutex);
776                         cfi_udelay(1);
777                         spin_lock(chip->mutex);
778                         /* Someone else might have been playing with it. */
779                         return -EAGAIN;
780                 }
781                 /* Fall through */
782         case FL_READY:
783         case FL_CFI_QUERY:
784         case FL_JEDEC_QUERY:
785                 return 0;
786
787         case FL_ERASING:
788                 if (!cfip ||
789                     !(cfip->FeatureSupport & 2) ||
790                     !(mode == FL_READY || mode == FL_POINT ||
791                      (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
792                         goto sleep;
793
794
795                 /* Erase suspend */
796                 map_write(map, CMD(0xB0), adr);
797
798                 /* If the flash has finished erasing, then 'erase suspend'
799                  * appears to make some (28F320) flash devices switch to
800                  * 'read' mode.  Make sure that we switch to 'read status'
801                  * mode so we get the right data. --rmk
802                  */
803                 map_write(map, CMD(0x70), adr);
804                 chip->oldstate = FL_ERASING;
805                 chip->state = FL_ERASE_SUSPENDING;
806                 chip->erase_suspended = 1;
807                 for (;;) {
808                         status = map_read(map, adr);
809                         if (map_word_andequal(map, status, status_OK, status_OK))
810                                 break;
811
812                         if (time_after(jiffies, timeo)) {
813                                 /* Urgh. Resume and pretend we weren't here.  */
814                                 map_write(map, CMD(0xd0), adr);
815                                 /* Make sure we're in 'read status' mode if it had finished */
816                                 map_write(map, CMD(0x70), adr);
817                                 chip->state = FL_ERASING;
818                                 chip->oldstate = FL_READY;
819                                 printk(KERN_ERR "%s: Chip not ready after erase "
820                                        "suspended: status = 0x%lx\n", map->name, status.x[0]);
821                                 return -EIO;
822                         }
823
824                         spin_unlock(chip->mutex);
825                         cfi_udelay(1);
826                         spin_lock(chip->mutex);
827                         /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
828                            So we can just loop here. */
829                 }
830                 chip->state = FL_STATUS;
831                 return 0;
832
833         case FL_XIP_WHILE_ERASING:
834                 if (mode != FL_READY && mode != FL_POINT &&
835                     (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
836                         goto sleep;
837                 chip->oldstate = chip->state;
838                 chip->state = FL_READY;
839                 return 0;
840
841         case FL_SHUTDOWN:
842                 /* The machine is rebooting now,so no one can get chip anymore */
843                 return -EIO;
844         case FL_POINT:
845                 /* Only if there's no operation suspended... */
846                 if (mode == FL_READY && chip->oldstate == FL_READY)
847                         return 0;
848                 /* Fall through */
849         default:
850         sleep:
851                 set_current_state(TASK_UNINTERRUPTIBLE);
852                 add_wait_queue(&chip->wq, &wait);
853                 spin_unlock(chip->mutex);
854                 schedule();
855                 remove_wait_queue(&chip->wq, &wait);
856                 spin_lock(chip->mutex);
857                 return -EAGAIN;
858         }
859 }
860
861 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
862 {
863         int ret;
864         DECLARE_WAITQUEUE(wait, current);
865
866  retry:
867         if (chip->priv &&
868             (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE
869             || mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) {
870                 /*
871                  * OK. We have possibility for contention on the write/erase
872                  * operations which are global to the real chip and not per
873                  * partition.  So let's fight it over in the partition which
874                  * currently has authority on the operation.
875                  *
876                  * The rules are as follows:
877                  *
878                  * - any write operation must own shared->writing.
879                  *
880                  * - any erase operation must own _both_ shared->writing and
881                  *   shared->erasing.
882                  *
883                  * - contention arbitration is handled in the owner's context.
884                  *
885                  * The 'shared' struct can be read and/or written only when
886                  * its lock is taken.
887                  */
888                 struct flchip_shared *shared = chip->priv;
889                 struct flchip *contender;
890                 spin_lock(&shared->lock);
891                 contender = shared->writing;
892                 if (contender && contender != chip) {
893                         /*
894                          * The engine to perform desired operation on this
895                          * partition is already in use by someone else.
896                          * Let's fight over it in the context of the chip
897                          * currently using it.  If it is possible to suspend,
898                          * that other partition will do just that, otherwise
899                          * it'll happily send us to sleep.  In any case, when
900                          * get_chip returns success we're clear to go ahead.
901                          */
902                         ret = spin_trylock(contender->mutex);
903                         spin_unlock(&shared->lock);
904                         if (!ret)
905                                 goto retry;
906                         spin_unlock(chip->mutex);
907                         ret = chip_ready(map, contender, contender->start, mode);
908                         spin_lock(chip->mutex);
909
910                         if (ret == -EAGAIN) {
911                                 spin_unlock(contender->mutex);
912                                 goto retry;
913                         }
914                         if (ret) {
915                                 spin_unlock(contender->mutex);
916                                 return ret;
917                         }
918                         spin_lock(&shared->lock);
919
920                         /* We should not own chip if it is already
921                          * in FL_SYNCING state. Put contender and retry. */
922                         if (chip->state == FL_SYNCING) {
923                                 put_chip(map, contender, contender->start);
924                                 spin_unlock(contender->mutex);
925                                 goto retry;
926                         }
927                         spin_unlock(contender->mutex);
928                 }
929
930                 /* Check if we already have suspended erase
931                  * on this chip. Sleep. */
932                 if (mode == FL_ERASING && shared->erasing
933                     && shared->erasing->oldstate == FL_ERASING) {
934                         spin_unlock(&shared->lock);
935                         set_current_state(TASK_UNINTERRUPTIBLE);
936                         add_wait_queue(&chip->wq, &wait);
937                         spin_unlock(chip->mutex);
938                         schedule();
939                         remove_wait_queue(&chip->wq, &wait);
940                         spin_lock(chip->mutex);
941                         goto retry;
942                 }
943
944                 /* We now own it */
945                 shared->writing = chip;
946                 if (mode == FL_ERASING)
947                         shared->erasing = chip;
948                 spin_unlock(&shared->lock);
949         }
950         ret = chip_ready(map, chip, adr, mode);
951         if (ret == -EAGAIN)
952                 goto retry;
953
954         return ret;
955 }
956
957 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
958 {
959         struct cfi_private *cfi = map->fldrv_priv;
960
961         if (chip->priv) {
962                 struct flchip_shared *shared = chip->priv;
963                 spin_lock(&shared->lock);
964                 if (shared->writing == chip && chip->oldstate == FL_READY) {
965                         /* We own the ability to write, but we're done */
966                         shared->writing = shared->erasing;
967                         if (shared->writing && shared->writing != chip) {
968                                 /* give back ownership to who we loaned it from */
969                                 struct flchip *loaner = shared->writing;
970                                 spin_lock(loaner->mutex);
971                                 spin_unlock(&shared->lock);
972                                 spin_unlock(chip->mutex);
973                                 put_chip(map, loaner, loaner->start);
974                                 spin_lock(chip->mutex);
975                                 spin_unlock(loaner->mutex);
976                                 wake_up(&chip->wq);
977                                 return;
978                         }
979                         shared->erasing = NULL;
980                         shared->writing = NULL;
981                 } else if (shared->erasing == chip && shared->writing != chip) {
982                         /*
983                          * We own the ability to erase without the ability
984                          * to write, which means the erase was suspended
985                          * and some other partition is currently writing.
986                          * Don't let the switch below mess things up since
987                          * we don't have ownership to resume anything.
988                          */
989                         spin_unlock(&shared->lock);
990                         wake_up(&chip->wq);
991                         return;
992                 }
993                 spin_unlock(&shared->lock);
994         }
995
996         switch(chip->oldstate) {
997         case FL_ERASING:
998                 chip->state = chip->oldstate;
999                 /* What if one interleaved chip has finished and the
1000                    other hasn't? The old code would leave the finished
1001                    one in READY mode. That's bad, and caused -EROFS
1002                    errors to be returned from do_erase_oneblock because
1003                    that's the only bit it checked for at the time.
1004                    As the state machine appears to explicitly allow
1005                    sending the 0x70 (Read Status) command to an erasing
1006                    chip and expecting it to be ignored, that's what we
1007                    do. */
1008                 map_write(map, CMD(0xd0), adr);
1009                 map_write(map, CMD(0x70), adr);
1010                 chip->oldstate = FL_READY;
1011                 chip->state = FL_ERASING;
1012                 break;
1013
1014         case FL_XIP_WHILE_ERASING:
1015                 chip->state = chip->oldstate;
1016                 chip->oldstate = FL_READY;
1017                 break;
1018
1019         case FL_READY:
1020         case FL_STATUS:
1021         case FL_JEDEC_QUERY:
1022                 /* We should really make set_vpp() count, rather than doing this */
1023                 DISABLE_VPP(map);
1024                 break;
1025         default:
1026                 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
1027         }
1028         wake_up(&chip->wq);
1029 }
1030
1031 #ifdef CONFIG_MTD_XIP
1032
1033 /*
1034  * No interrupt what so ever can be serviced while the flash isn't in array
1035  * mode.  This is ensured by the xip_disable() and xip_enable() functions
1036  * enclosing any code path where the flash is known not to be in array mode.
1037  * And within a XIP disabled code path, only functions marked with __xipram
1038  * may be called and nothing else (it's a good thing to inspect generated
1039  * assembly to make sure inline functions were actually inlined and that gcc
1040  * didn't emit calls to its own support functions). Also configuring MTD CFI
1041  * support to a single buswidth and a single interleave is also recommended.
1042  */
1043
1044 static void xip_disable(struct map_info *map, struct flchip *chip,
1045                         unsigned long adr)
1046 {
1047         /* TODO: chips with no XIP use should ignore and return */
1048         (void) map_read(map, adr); /* ensure mmu mapping is up to date */
1049         local_irq_disable();
1050 }
1051
1052 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1053                                 unsigned long adr)
1054 {
1055         struct cfi_private *cfi = map->fldrv_priv;
1056         if (chip->state != FL_POINT && chip->state != FL_READY) {
1057                 map_write(map, CMD(0xff), adr);
1058                 chip->state = FL_READY;
1059         }
1060         (void) map_read(map, adr);
1061         xip_iprefetch();
1062         local_irq_enable();
1063 }
1064
1065 /*
1066  * When a delay is required for the flash operation to complete, the
1067  * xip_wait_for_operation() function is polling for both the given timeout
1068  * and pending (but still masked) hardware interrupts.  Whenever there is an
1069  * interrupt pending then the flash erase or write operation is suspended,
1070  * array mode restored and interrupts unmasked.  Task scheduling might also
1071  * happen at that point.  The CPU eventually returns from the interrupt or
1072  * the call to schedule() and the suspended flash operation is resumed for
1073  * the remaining of the delay period.
1074  *
1075  * Warning: this function _will_ fool interrupt latency tracing tools.
1076  */
1077
1078 static int __xipram xip_wait_for_operation(
1079                 struct map_info *map, struct flchip *chip,
1080                 unsigned long adr, unsigned int chip_op_time_max)
1081 {
1082         struct cfi_private *cfi = map->fldrv_priv;
1083         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1084         map_word status, OK = CMD(0x80);
1085         unsigned long usec, suspended, start, done;
1086         flstate_t oldstate, newstate;
1087
1088         start = xip_currtime();
1089         usec = chip_op_time_max;
1090         if (usec == 0)
1091                 usec = 500000;
1092         done = 0;
1093
1094         do {
1095                 cpu_relax();
1096                 if (xip_irqpending() && cfip &&
1097                     ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1098                      (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1099                     (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1100                         /*
1101                          * Let's suspend the erase or write operation when
1102                          * supported.  Note that we currently don't try to
1103                          * suspend interleaved chips if there is already
1104                          * another operation suspended (imagine what happens
1105                          * when one chip was already done with the current
1106                          * operation while another chip suspended it, then
1107                          * we resume the whole thing at once).  Yes, it
1108                          * can happen!
1109                          */
1110                         usec -= done;
1111                         map_write(map, CMD(0xb0), adr);
1112                         map_write(map, CMD(0x70), adr);
1113                         suspended = xip_currtime();
1114                         do {
1115                                 if (xip_elapsed_since(suspended) > 100000) {
1116                                         /*
1117                                          * The chip doesn't want to suspend
1118                                          * after waiting for 100 msecs.
1119                                          * This is a critical error but there
1120                                          * is not much we can do here.
1121                                          */
1122                                         return -EIO;
1123                                 }
1124                                 status = map_read(map, adr);
1125                         } while (!map_word_andequal(map, status, OK, OK));
1126
1127                         /* Suspend succeeded */
1128                         oldstate = chip->state;
1129                         if (oldstate == FL_ERASING) {
1130                                 if (!map_word_bitsset(map, status, CMD(0x40)))
1131                                         break;
1132                                 newstate = FL_XIP_WHILE_ERASING;
1133                                 chip->erase_suspended = 1;
1134                         } else {
1135                                 if (!map_word_bitsset(map, status, CMD(0x04)))
1136                                         break;
1137                                 newstate = FL_XIP_WHILE_WRITING;
1138                                 chip->write_suspended = 1;
1139                         }
1140                         chip->state = newstate;
1141                         map_write(map, CMD(0xff), adr);
1142                         (void) map_read(map, adr);
1143                         xip_iprefetch();
1144                         local_irq_enable();
1145                         spin_unlock(chip->mutex);
1146                         xip_iprefetch();
1147                         cond_resched();
1148
1149                         /*
1150                          * We're back.  However someone else might have
1151                          * decided to go write to the chip if we are in
1152                          * a suspended erase state.  If so let's wait
1153                          * until it's done.
1154                          */
1155                         spin_lock(chip->mutex);
1156                         while (chip->state != newstate) {
1157                                 DECLARE_WAITQUEUE(wait, current);
1158                                 set_current_state(TASK_UNINTERRUPTIBLE);
1159                                 add_wait_queue(&chip->wq, &wait);
1160                                 spin_unlock(chip->mutex);
1161                                 schedule();
1162                                 remove_wait_queue(&chip->wq, &wait);
1163                                 spin_lock(chip->mutex);
1164                         }
1165                         /* Disallow XIP again */
1166                         local_irq_disable();
1167
1168                         /* Resume the write or erase operation */
1169                         map_write(map, CMD(0xd0), adr);
1170                         map_write(map, CMD(0x70), adr);
1171                         chip->state = oldstate;
1172                         start = xip_currtime();
1173                 } else if (usec >= 1000000/HZ) {
1174                         /*
1175                          * Try to save on CPU power when waiting delay
1176                          * is at least a system timer tick period.
1177                          * No need to be extremely accurate here.
1178                          */
1179                         xip_cpu_idle();
1180                 }
1181                 status = map_read(map, adr);
1182                 done = xip_elapsed_since(start);
1183         } while (!map_word_andequal(map, status, OK, OK)
1184                  && done < usec);
1185
1186         return (done >= usec) ? -ETIME : 0;
1187 }
1188
1189 /*
1190  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1191  * the flash is actively programming or erasing since we have to poll for
1192  * the operation to complete anyway.  We can't do that in a generic way with
1193  * a XIP setup so do it before the actual flash operation in this case
1194  * and stub it out from INVAL_CACHE_AND_WAIT.
1195  */
1196 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1197         INVALIDATE_CACHED_RANGE(map, from, size)
1198
1199 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
1200         xip_wait_for_operation(map, chip, cmd_adr, usec_max)
1201
1202 #else
1203
1204 #define xip_disable(map, chip, adr)
1205 #define xip_enable(map, chip, adr)
1206 #define XIP_INVAL_CACHED_RANGE(x...)
1207 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1208
1209 static int inval_cache_and_wait_for_operation(
1210                 struct map_info *map, struct flchip *chip,
1211                 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1212                 unsigned int chip_op_time, unsigned int chip_op_time_max)
1213 {
1214         struct cfi_private *cfi = map->fldrv_priv;
1215         map_word status, status_OK = CMD(0x80);
1216         int chip_state = chip->state;
1217         unsigned int timeo, sleep_time, reset_timeo;
1218
1219         spin_unlock(chip->mutex);
1220         if (inval_len)
1221                 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1222         spin_lock(chip->mutex);
1223
1224         timeo = chip_op_time_max;
1225         if (!timeo)
1226                 timeo = 500000;
1227         reset_timeo = timeo;
1228         sleep_time = chip_op_time / 2;
1229
1230         for (;;) {
1231                 status = map_read(map, cmd_adr);
1232                 if (map_word_andequal(map, status, status_OK, status_OK))
1233                         break;
1234
1235                 if (!timeo) {
1236                         map_write(map, CMD(0x70), cmd_adr);
1237                         chip->state = FL_STATUS;
1238                         return -ETIME;
1239                 }
1240
1241                 /* OK Still waiting. Drop the lock, wait a while and retry. */
1242                 spin_unlock(chip->mutex);
1243                 if (sleep_time >= 1000000/HZ) {
1244                         /*
1245                          * Half of the normal delay still remaining
1246                          * can be performed with a sleeping delay instead
1247                          * of busy waiting.
1248                          */
1249                         msleep(sleep_time/1000);
1250                         timeo -= sleep_time;
1251                         sleep_time = 1000000/HZ;
1252                 } else {
1253                         udelay(1);
1254                         cond_resched();
1255                         timeo--;
1256                 }
1257                 spin_lock(chip->mutex);
1258
1259                 while (chip->state != chip_state) {
1260                         /* Someone's suspended the operation: sleep */
1261                         DECLARE_WAITQUEUE(wait, current);
1262                         set_current_state(TASK_UNINTERRUPTIBLE);
1263                         add_wait_queue(&chip->wq, &wait);
1264                         spin_unlock(chip->mutex);
1265                         schedule();
1266                         remove_wait_queue(&chip->wq, &wait);
1267                         spin_lock(chip->mutex);
1268                 }
1269                 if (chip->erase_suspended && chip_state == FL_ERASING)  {
1270                         /* Erase suspend occured while sleep: reset timeout */
1271                         timeo = reset_timeo;
1272                         chip->erase_suspended = 0;
1273                 }
1274                 if (chip->write_suspended && chip_state == FL_WRITING)  {
1275                         /* Write suspend occured while sleep: reset timeout */
1276                         timeo = reset_timeo;
1277                         chip->write_suspended = 0;
1278                 }
1279         }
1280
1281         /* Done and happy. */
1282         chip->state = FL_STATUS;
1283         return 0;
1284 }
1285
1286 #endif
1287
1288 #define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
1289         INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
1290
1291
1292 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1293 {
1294         unsigned long cmd_addr;
1295         struct cfi_private *cfi = map->fldrv_priv;
1296         int ret = 0;
1297
1298         adr += chip->start;
1299
1300         /* Ensure cmd read/writes are aligned. */
1301         cmd_addr = adr & ~(map_bankwidth(map)-1);
1302
1303         spin_lock(chip->mutex);
1304
1305         ret = get_chip(map, chip, cmd_addr, FL_POINT);
1306
1307         if (!ret) {
1308                 if (chip->state != FL_POINT && chip->state != FL_READY)
1309                         map_write(map, CMD(0xff), cmd_addr);
1310
1311                 chip->state = FL_POINT;
1312                 chip->ref_point_counter++;
1313         }
1314         spin_unlock(chip->mutex);
1315
1316         return ret;
1317 }
1318
1319 static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1320                 size_t *retlen, void **virt, resource_size_t *phys)
1321 {
1322         struct map_info *map = mtd->priv;
1323         struct cfi_private *cfi = map->fldrv_priv;
1324         unsigned long ofs, last_end = 0;
1325         int chipnum;
1326         int ret = 0;
1327
1328         if (!map->virt || (from + len > mtd->size))
1329                 return -EINVAL;
1330
1331         /* Now lock the chip(s) to POINT state */
1332
1333         /* ofs: offset within the first chip that the first read should start */
1334         chipnum = (from >> cfi->chipshift);
1335         ofs = from - (chipnum << cfi->chipshift);
1336
1337         *virt = map->virt + cfi->chips[chipnum].start + ofs;
1338         *retlen = 0;
1339         if (phys)
1340                 *phys = map->phys + cfi->chips[chipnum].start + ofs;
1341
1342         while (len) {
1343                 unsigned long thislen;
1344
1345                 if (chipnum >= cfi->numchips)
1346                         break;
1347
1348                 /* We cannot point across chips that are virtually disjoint */
1349                 if (!last_end)
1350                         last_end = cfi->chips[chipnum].start;
1351                 else if (cfi->chips[chipnum].start != last_end)
1352                         break;
1353
1354                 if ((len + ofs -1) >> cfi->chipshift)
1355                         thislen = (1<<cfi->chipshift) - ofs;
1356                 else
1357                         thislen = len;
1358
1359                 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1360                 if (ret)
1361                         break;
1362
1363                 *retlen += thislen;
1364                 len -= thislen;
1365
1366                 ofs = 0;
1367                 last_end += 1 << cfi->chipshift;
1368                 chipnum++;
1369         }
1370         return 0;
1371 }
1372
1373 static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1374 {
1375         struct map_info *map = mtd->priv;
1376         struct cfi_private *cfi = map->fldrv_priv;
1377         unsigned long ofs;
1378         int chipnum;
1379
1380         /* Now unlock the chip(s) POINT state */
1381
1382         /* ofs: offset within the first chip that the first read should start */
1383         chipnum = (from >> cfi->chipshift);
1384         ofs = from - (chipnum <<  cfi->chipshift);
1385
1386         while (len) {
1387                 unsigned long thislen;
1388                 struct flchip *chip;
1389
1390                 chip = &cfi->chips[chipnum];
1391                 if (chipnum >= cfi->numchips)
1392                         break;
1393
1394                 if ((len + ofs -1) >> cfi->chipshift)
1395                         thislen = (1<<cfi->chipshift) - ofs;
1396                 else
1397                         thislen = len;
1398
1399                 spin_lock(chip->mutex);
1400                 if (chip->state == FL_POINT) {
1401                         chip->ref_point_counter--;
1402                         if(chip->ref_point_counter == 0)
1403                                 chip->state = FL_READY;
1404                 } else
1405                         printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1406
1407                 put_chip(map, chip, chip->start);
1408                 spin_unlock(chip->mutex);
1409
1410                 len -= thislen;
1411                 ofs = 0;
1412                 chipnum++;
1413         }
1414 }
1415
1416 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1417 {
1418         unsigned long cmd_addr;
1419         struct cfi_private *cfi = map->fldrv_priv;
1420         int ret;
1421
1422         adr += chip->start;
1423
1424         /* Ensure cmd read/writes are aligned. */
1425         cmd_addr = adr & ~(map_bankwidth(map)-1);
1426
1427         spin_lock(chip->mutex);
1428         ret = get_chip(map, chip, cmd_addr, FL_READY);
1429         if (ret) {
1430                 spin_unlock(chip->mutex);
1431                 return ret;
1432         }
1433
1434         if (chip->state != FL_POINT && chip->state != FL_READY) {
1435                 map_write(map, CMD(0xff), cmd_addr);
1436
1437                 chip->state = FL_READY;
1438         }
1439
1440         map_copy_from(map, buf, adr, len);
1441
1442         put_chip(map, chip, cmd_addr);
1443
1444         spin_unlock(chip->mutex);
1445         return 0;
1446 }
1447
1448 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1449 {
1450         struct map_info *map = mtd->priv;
1451         struct cfi_private *cfi = map->fldrv_priv;
1452         unsigned long ofs;
1453         int chipnum;
1454         int ret = 0;
1455
1456         /* ofs: offset within the first chip that the first read should start */
1457         chipnum = (from >> cfi->chipshift);
1458         ofs = from - (chipnum <<  cfi->chipshift);
1459
1460         *retlen = 0;
1461
1462         while (len) {
1463                 unsigned long thislen;
1464
1465                 if (chipnum >= cfi->numchips)
1466                         break;
1467
1468                 if ((len + ofs -1) >> cfi->chipshift)
1469                         thislen = (1<<cfi->chipshift) - ofs;
1470                 else
1471                         thislen = len;
1472
1473                 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1474                 if (ret)
1475                         break;
1476
1477                 *retlen += thislen;
1478                 len -= thislen;
1479                 buf += thislen;
1480
1481                 ofs = 0;
1482                 chipnum++;
1483         }
1484         return ret;
1485 }
1486
1487 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1488                                      unsigned long adr, map_word datum, int mode)
1489 {
1490         struct cfi_private *cfi = map->fldrv_priv;
1491         map_word status, write_cmd;
1492         int ret=0;
1493
1494         adr += chip->start;
1495
1496         switch (mode) {
1497         case FL_WRITING:
1498                 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1499                 break;
1500         case FL_OTP_WRITE:
1501                 write_cmd = CMD(0xc0);
1502                 break;
1503         default:
1504                 return -EINVAL;
1505         }
1506
1507         spin_lock(chip->mutex);
1508         ret = get_chip(map, chip, adr, mode);
1509         if (ret) {
1510                 spin_unlock(chip->mutex);
1511                 return ret;
1512         }
1513
1514         XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1515         ENABLE_VPP(map);
1516         xip_disable(map, chip, adr);
1517         map_write(map, write_cmd, adr);
1518         map_write(map, datum, adr);
1519         chip->state = mode;
1520
1521         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1522                                    adr, map_bankwidth(map),
1523                                    chip->word_write_time,
1524                                    chip->word_write_time_max);
1525         if (ret) {
1526                 xip_enable(map, chip, adr);
1527                 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1528                 goto out;
1529         }
1530
1531         /* check for errors */
1532         status = map_read(map, adr);
1533         if (map_word_bitsset(map, status, CMD(0x1a))) {
1534                 unsigned long chipstatus = MERGESTATUS(status);
1535
1536                 /* reset status */
1537                 map_write(map, CMD(0x50), adr);
1538                 map_write(map, CMD(0x70), adr);
1539                 xip_enable(map, chip, adr);
1540
1541                 if (chipstatus & 0x02) {
1542                         ret = -EROFS;
1543                 } else if (chipstatus & 0x08) {
1544                         printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1545                         ret = -EIO;
1546                 } else {
1547                         printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1548                         ret = -EINVAL;
1549                 }
1550
1551                 goto out;
1552         }
1553
1554         xip_enable(map, chip, adr);
1555  out:   put_chip(map, chip, adr);
1556         spin_unlock(chip->mutex);
1557         return ret;
1558 }
1559
1560
1561 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1562 {
1563         struct map_info *map = mtd->priv;
1564         struct cfi_private *cfi = map->fldrv_priv;
1565         int ret = 0;
1566         int chipnum;
1567         unsigned long ofs;
1568
1569         *retlen = 0;
1570         if (!len)
1571                 return 0;
1572
1573         chipnum = to >> cfi->chipshift;
1574         ofs = to  - (chipnum << cfi->chipshift);
1575
1576         /* If it's not bus-aligned, do the first byte write */
1577         if (ofs & (map_bankwidth(map)-1)) {
1578                 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1579                 int gap = ofs - bus_ofs;
1580                 int n;
1581                 map_word datum;
1582
1583                 n = min_t(int, len, map_bankwidth(map)-gap);
1584                 datum = map_word_ff(map);
1585                 datum = map_word_load_partial(map, datum, buf, gap, n);
1586
1587                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1588                                                bus_ofs, datum, FL_WRITING);
1589                 if (ret)
1590                         return ret;
1591
1592                 len -= n;
1593                 ofs += n;
1594                 buf += n;
1595                 (*retlen) += n;
1596
1597                 if (ofs >> cfi->chipshift) {
1598                         chipnum ++;
1599                         ofs = 0;
1600                         if (chipnum == cfi->numchips)
1601                                 return 0;
1602                 }
1603         }
1604
1605         while(len >= map_bankwidth(map)) {
1606                 map_word datum = map_word_load(map, buf);
1607
1608                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1609                                        ofs, datum, FL_WRITING);
1610                 if (ret)
1611                         return ret;
1612
1613                 ofs += map_bankwidth(map);
1614                 buf += map_bankwidth(map);
1615                 (*retlen) += map_bankwidth(map);
1616                 len -= map_bankwidth(map);
1617
1618                 if (ofs >> cfi->chipshift) {
1619                         chipnum ++;
1620                         ofs = 0;
1621                         if (chipnum == cfi->numchips)
1622                                 return 0;
1623                 }
1624         }
1625
1626         if (len & (map_bankwidth(map)-1)) {
1627                 map_word datum;
1628
1629                 datum = map_word_ff(map);
1630                 datum = map_word_load_partial(map, datum, buf, 0, len);
1631
1632                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1633                                        ofs, datum, FL_WRITING);
1634                 if (ret)
1635                         return ret;
1636
1637                 (*retlen) += len;
1638         }
1639
1640         return 0;
1641 }
1642
1643
1644 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1645                                     unsigned long adr, const struct kvec **pvec,
1646                                     unsigned long *pvec_seek, int len)
1647 {
1648         struct cfi_private *cfi = map->fldrv_priv;
1649         map_word status, write_cmd, datum;
1650         unsigned long cmd_adr;
1651         int ret, wbufsize, word_gap, words;
1652         const struct kvec *vec;
1653         unsigned long vec_seek;
1654         unsigned long initial_adr;
1655         int initial_len = len;
1656
1657         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1658         adr += chip->start;
1659         initial_adr = adr;
1660         cmd_adr = adr & ~(wbufsize-1);
1661
1662         /* Let's determine this according to the interleave only once */
1663         write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1664
1665         spin_lock(chip->mutex);
1666         ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1667         if (ret) {
1668                 spin_unlock(chip->mutex);
1669                 return ret;
1670         }
1671
1672         XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1673         ENABLE_VPP(map);
1674         xip_disable(map, chip, cmd_adr);
1675
1676         /* ยง4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1677            [...], the device will not accept any more Write to Buffer commands".
1678            So we must check here and reset those bits if they're set. Otherwise
1679            we're just pissing in the wind */
1680         if (chip->state != FL_STATUS) {
1681                 map_write(map, CMD(0x70), cmd_adr);
1682                 chip->state = FL_STATUS;
1683         }
1684         status = map_read(map, cmd_adr);
1685         if (map_word_bitsset(map, status, CMD(0x30))) {
1686                 xip_enable(map, chip, cmd_adr);
1687                 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1688                 xip_disable(map, chip, cmd_adr);
1689                 map_write(map, CMD(0x50), cmd_adr);
1690                 map_write(map, CMD(0x70), cmd_adr);
1691         }
1692
1693         chip->state = FL_WRITING_TO_BUFFER;
1694         map_write(map, write_cmd, cmd_adr);
1695         ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
1696         if (ret) {
1697                 /* Argh. Not ready for write to buffer */
1698                 map_word Xstatus = map_read(map, cmd_adr);
1699                 map_write(map, CMD(0x70), cmd_adr);
1700                 chip->state = FL_STATUS;
1701                 status = map_read(map, cmd_adr);
1702                 map_write(map, CMD(0x50), cmd_adr);
1703                 map_write(map, CMD(0x70), cmd_adr);
1704                 xip_enable(map, chip, cmd_adr);
1705                 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1706                                 map->name, Xstatus.x[0], status.x[0]);
1707                 goto out;
1708         }
1709
1710         /* Figure out the number of words to write */
1711         word_gap = (-adr & (map_bankwidth(map)-1));
1712         words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
1713         if (!word_gap) {
1714                 words--;
1715         } else {
1716                 word_gap = map_bankwidth(map) - word_gap;
1717                 adr -= word_gap;
1718                 datum = map_word_ff(map);
1719         }
1720
1721         /* Write length of data to come */
1722         map_write(map, CMD(words), cmd_adr );
1723
1724         /* Write data */
1725         vec = *pvec;
1726         vec_seek = *pvec_seek;
1727         do {
1728                 int n = map_bankwidth(map) - word_gap;
1729                 if (n > vec->iov_len - vec_seek)
1730                         n = vec->iov_len - vec_seek;
1731                 if (n > len)
1732                         n = len;
1733
1734                 if (!word_gap && len < map_bankwidth(map))
1735                         datum = map_word_ff(map);
1736
1737                 datum = map_word_load_partial(map, datum,
1738                                               vec->iov_base + vec_seek,
1739                                               word_gap, n);
1740
1741                 len -= n;
1742                 word_gap += n;
1743                 if (!len || word_gap == map_bankwidth(map)) {
1744                         map_write(map, datum, adr);
1745                         adr += map_bankwidth(map);
1746                         word_gap = 0;
1747                 }
1748
1749                 vec_seek += n;
1750                 if (vec_seek == vec->iov_len) {
1751                         vec++;
1752                         vec_seek = 0;
1753                 }
1754         } while (len);
1755         *pvec = vec;
1756         *pvec_seek = vec_seek;
1757
1758         /* GO GO GO */
1759         map_write(map, CMD(0xd0), cmd_adr);
1760         chip->state = FL_WRITING;
1761
1762         ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1763                                    initial_adr, initial_len,
1764                                    chip->buffer_write_time,
1765                                    chip->buffer_write_time_max);
1766         if (ret) {
1767                 map_write(map, CMD(0x70), cmd_adr);
1768                 chip->state = FL_STATUS;
1769                 xip_enable(map, chip, cmd_adr);
1770                 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1771                 goto out;
1772         }
1773
1774         /* check for errors */
1775         status = map_read(map, cmd_adr);
1776         if (map_word_bitsset(map, status, CMD(0x1a))) {
1777                 unsigned long chipstatus = MERGESTATUS(status);
1778
1779                 /* reset status */
1780                 map_write(map, CMD(0x50), cmd_adr);
1781                 map_write(map, CMD(0x70), cmd_adr);
1782                 xip_enable(map, chip, cmd_adr);
1783
1784                 if (chipstatus & 0x02) {
1785                         ret = -EROFS;
1786                 } else if (chipstatus & 0x08) {
1787                         printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1788                         ret = -EIO;
1789                 } else {
1790                         printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1791                         ret = -EINVAL;
1792                 }
1793
1794                 goto out;
1795         }
1796
1797         xip_enable(map, chip, cmd_adr);
1798  out:   put_chip(map, chip, cmd_adr);
1799         spin_unlock(chip->mutex);
1800         return ret;
1801 }
1802
1803 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1804                                 unsigned long count, loff_t to, size_t *retlen)
1805 {
1806         struct map_info *map = mtd->priv;
1807         struct cfi_private *cfi = map->fldrv_priv;
1808         int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1809         int ret = 0;
1810         int chipnum;
1811         unsigned long ofs, vec_seek, i;
1812         size_t len = 0;
1813
1814         for (i = 0; i < count; i++)
1815                 len += vecs[i].iov_len;
1816
1817         *retlen = 0;
1818         if (!len)
1819                 return 0;
1820
1821         chipnum = to >> cfi->chipshift;
1822         ofs = to - (chipnum << cfi->chipshift);
1823         vec_seek = 0;
1824
1825         do {
1826                 /* We must not cross write block boundaries */
1827                 int size = wbufsize - (ofs & (wbufsize-1));
1828
1829                 if (size > len)
1830                         size = len;
1831                 ret = do_write_buffer(map, &cfi->chips[chipnum],
1832                                       ofs, &vecs, &vec_seek, size);
1833                 if (ret)
1834                         return ret;
1835
1836                 ofs += size;
1837                 (*retlen) += size;
1838                 len -= size;
1839
1840                 if (ofs >> cfi->chipshift) {
1841                         chipnum ++;
1842                         ofs = 0;
1843                         if (chipnum == cfi->numchips)
1844                                 return 0;
1845                 }
1846
1847                 /* Be nice and reschedule with the chip in a usable state for other
1848                    processes. */
1849                 cond_resched();
1850
1851         } while (len);
1852
1853         return 0;
1854 }
1855
1856 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1857                                        size_t len, size_t *retlen, const u_char *buf)
1858 {
1859         struct kvec vec;
1860
1861         vec.iov_base = (void *) buf;
1862         vec.iov_len = len;
1863
1864         return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1865 }
1866
1867 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1868                                       unsigned long adr, int len, void *thunk)
1869 {
1870         struct cfi_private *cfi = map->fldrv_priv;
1871         map_word status;
1872         int retries = 3;
1873         int ret;
1874
1875         adr += chip->start;
1876
1877  retry:
1878         spin_lock(chip->mutex);
1879         ret = get_chip(map, chip, adr, FL_ERASING);
1880         if (ret) {
1881                 spin_unlock(chip->mutex);
1882                 return ret;
1883         }
1884
1885         XIP_INVAL_CACHED_RANGE(map, adr, len);
1886         ENABLE_VPP(map);
1887         xip_disable(map, chip, adr);
1888
1889         /* Clear the status register first */
1890         map_write(map, CMD(0x50), adr);
1891
1892         /* Now erase */
1893         map_write(map, CMD(0x20), adr);
1894         map_write(map, CMD(0xD0), adr);
1895         chip->state = FL_ERASING;
1896         chip->erase_suspended = 0;
1897
1898         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1899                                    adr, len,
1900                                    chip->erase_time,
1901                                    chip->erase_time_max);
1902         if (ret) {
1903                 map_write(map, CMD(0x70), adr);
1904                 chip->state = FL_STATUS;
1905                 xip_enable(map, chip, adr);
1906                 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1907                 goto out;
1908         }
1909
1910         /* We've broken this before. It doesn't hurt to be safe */
1911         map_write(map, CMD(0x70), adr);
1912         chip->state = FL_STATUS;
1913         status = map_read(map, adr);
1914
1915         /* check for errors */
1916         if (map_word_bitsset(map, status, CMD(0x3a))) {
1917                 unsigned long chipstatus = MERGESTATUS(status);
1918
1919                 /* Reset the error bits */
1920                 map_write(map, CMD(0x50), adr);
1921                 map_write(map, CMD(0x70), adr);
1922                 xip_enable(map, chip, adr);
1923
1924                 if ((chipstatus & 0x30) == 0x30) {
1925                         printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1926                         ret = -EINVAL;
1927                 } else if (chipstatus & 0x02) {
1928                         /* Protection bit set */
1929                         ret = -EROFS;
1930                 } else if (chipstatus & 0x8) {
1931                         /* Voltage */
1932                         printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1933                         ret = -EIO;
1934                 } else if (chipstatus & 0x20 && retries--) {
1935                         printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1936                         put_chip(map, chip, adr);
1937                         spin_unlock(chip->mutex);
1938                         goto retry;
1939                 } else {
1940                         printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1941                         ret = -EIO;
1942                 }
1943
1944                 goto out;
1945         }
1946
1947         xip_enable(map, chip, adr);
1948  out:   put_chip(map, chip, adr);
1949         spin_unlock(chip->mutex);
1950         return ret;
1951 }
1952
1953 static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1954 {
1955         unsigned long ofs, len;
1956         int ret;
1957
1958         ofs = instr->addr;
1959         len = instr->len;
1960
1961         ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1962         if (ret)
1963                 return ret;
1964
1965         instr->state = MTD_ERASE_DONE;
1966         mtd_erase_callback(instr);
1967
1968         return 0;
1969 }
1970
1971 static void cfi_intelext_sync (struct mtd_info *mtd)
1972 {
1973         struct map_info *map = mtd->priv;
1974         struct cfi_private *cfi = map->fldrv_priv;
1975         int i;
1976         struct flchip *chip;
1977         int ret = 0;
1978
1979         for (i=0; !ret && i<cfi->numchips; i++) {
1980                 chip = &cfi->chips[i];
1981
1982                 spin_lock(chip->mutex);
1983                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1984
1985                 if (!ret) {
1986                         chip->oldstate = chip->state;
1987                         chip->state = FL_SYNCING;
1988                         /* No need to wake_up() on this state change -
1989                          * as the whole point is that nobody can do anything
1990                          * with the chip now anyway.
1991                          */
1992                 }
1993                 spin_unlock(chip->mutex);
1994         }
1995
1996         /* Unlock the chips again */
1997
1998         for (i--; i >=0; i--) {
1999                 chip = &cfi->chips[i];
2000
2001                 spin_lock(chip->mutex);
2002
2003                 if (chip->state == FL_SYNCING) {
2004                         chip->state = chip->oldstate;
2005                         chip->oldstate = FL_READY;
2006                         wake_up(&chip->wq);
2007                 }
2008                 spin_unlock(chip->mutex);
2009         }
2010 }
2011
2012 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
2013                                                 struct flchip *chip,
2014                                                 unsigned long adr,
2015                                                 int len, void *thunk)
2016 {
2017         struct cfi_private *cfi = map->fldrv_priv;
2018         int status, ofs_factor = cfi->interleave * cfi->device_type;
2019
2020         adr += chip->start;
2021         xip_disable(map, chip, adr+(2*ofs_factor));
2022         map_write(map, CMD(0x90), adr+(2*ofs_factor));
2023         chip->state = FL_JEDEC_QUERY;
2024         status = cfi_read_query(map, adr+(2*ofs_factor));
2025         xip_enable(map, chip, 0);
2026         return status;
2027 }
2028
2029 #ifdef DEBUG_LOCK_BITS
2030 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
2031                                                 struct flchip *chip,
2032                                                 unsigned long adr,
2033                                                 int len, void *thunk)
2034 {
2035         printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
2036                adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
2037         return 0;
2038 }
2039 #endif
2040
2041 #define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
2042 #define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
2043
2044 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
2045                                        unsigned long adr, int len, void *thunk)
2046 {
2047         struct cfi_private *cfi = map->fldrv_priv;
2048         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2049         int udelay;
2050         int ret;
2051
2052         adr += chip->start;
2053
2054         spin_lock(chip->mutex);
2055         ret = get_chip(map, chip, adr, FL_LOCKING);
2056         if (ret) {
2057                 spin_unlock(chip->mutex);
2058                 return ret;
2059         }
2060
2061         ENABLE_VPP(map);
2062         xip_disable(map, chip, adr);
2063
2064         map_write(map, CMD(0x60), adr);
2065         if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2066                 map_write(map, CMD(0x01), adr);
2067                 chip->state = FL_LOCKING;
2068         } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2069                 map_write(map, CMD(0xD0), adr);
2070                 chip->state = FL_UNLOCKING;
2071         } else
2072                 BUG();
2073
2074         /*
2075          * If Instant Individual Block Locking supported then no need
2076          * to delay.
2077          */
2078         udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
2079
2080         ret = WAIT_TIMEOUT(map, chip, adr, udelay, udelay * 100);
2081         if (ret) {
2082                 map_write(map, CMD(0x70), adr);
2083                 chip->state = FL_STATUS;
2084                 xip_enable(map, chip, adr);
2085                 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
2086                 goto out;
2087         }
2088
2089         xip_enable(map, chip, adr);
2090 out:    put_chip(map, chip, adr);
2091         spin_unlock(chip->mutex);
2092         return ret;
2093 }
2094
2095 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2096 {
2097         int ret;
2098
2099 #ifdef DEBUG_LOCK_BITS
2100         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2101                __func__, ofs, len);
2102         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2103                 ofs, len, NULL);
2104 #endif
2105
2106         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2107                 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2108
2109 #ifdef DEBUG_LOCK_BITS
2110         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2111                __func__, ret);
2112         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2113                 ofs, len, NULL);
2114 #endif
2115
2116         return ret;
2117 }
2118
2119 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2120 {
2121         int ret;
2122
2123 #ifdef DEBUG_LOCK_BITS
2124         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2125                __func__, ofs, len);
2126         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2127                 ofs, len, NULL);
2128 #endif
2129
2130         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2131                                         ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2132
2133 #ifdef DEBUG_LOCK_BITS
2134         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2135                __func__, ret);
2136         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2137                 ofs, len, NULL);
2138 #endif
2139
2140         return ret;
2141 }
2142
2143 #ifdef CONFIG_MTD_OTP
2144
2145 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2146                         u_long data_offset, u_char *buf, u_int size,
2147                         u_long prot_offset, u_int groupno, u_int groupsize);
2148
2149 static int __xipram
2150 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2151             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2152 {
2153         struct cfi_private *cfi = map->fldrv_priv;
2154         int ret;
2155
2156         spin_lock(chip->mutex);
2157         ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2158         if (ret) {
2159                 spin_unlock(chip->mutex);
2160                 return ret;
2161         }
2162
2163         /* let's ensure we're not reading back cached data from array mode */
2164         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2165
2166         xip_disable(map, chip, chip->start);
2167         if (chip->state != FL_JEDEC_QUERY) {
2168                 map_write(map, CMD(0x90), chip->start);
2169                 chip->state = FL_JEDEC_QUERY;
2170         }
2171         map_copy_from(map, buf, chip->start + offset, size);
2172         xip_enable(map, chip, chip->start);
2173
2174         /* then ensure we don't keep OTP data in the cache */
2175         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2176
2177         put_chip(map, chip, chip->start);
2178         spin_unlock(chip->mutex);
2179         return 0;
2180 }
2181
2182 static int
2183 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2184              u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2185 {
2186         int ret;
2187
2188         while (size) {
2189                 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2190                 int gap = offset - bus_ofs;
2191                 int n = min_t(int, size, map_bankwidth(map)-gap);
2192                 map_word datum = map_word_ff(map);
2193
2194                 datum = map_word_load_partial(map, datum, buf, gap, n);
2195                 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2196                 if (ret)
2197                         return ret;
2198
2199                 offset += n;
2200                 buf += n;
2201                 size -= n;
2202         }
2203
2204         return 0;
2205 }
2206
2207 static int
2208 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2209             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2210 {
2211         struct cfi_private *cfi = map->fldrv_priv;
2212         map_word datum;
2213
2214         /* make sure area matches group boundaries */
2215         if (size != grpsz)
2216                 return -EXDEV;
2217
2218         datum = map_word_ff(map);
2219         datum = map_word_clr(map, datum, CMD(1 << grpno));
2220         return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2221 }
2222
2223 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2224                                  size_t *retlen, u_char *buf,
2225                                  otp_op_t action, int user_regs)
2226 {
2227         struct map_info *map = mtd->priv;
2228         struct cfi_private *cfi = map->fldrv_priv;
2229         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2230         struct flchip *chip;
2231         struct cfi_intelext_otpinfo *otp;
2232         u_long devsize, reg_prot_offset, data_offset;
2233         u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2234         u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2235         int ret;
2236
2237         *retlen = 0;
2238
2239         /* Check that we actually have some OTP registers */
2240         if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2241                 return -ENODATA;
2242
2243         /* we need real chips here not virtual ones */
2244         devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2245         chip_step = devsize >> cfi->chipshift;
2246         chip_num = 0;
2247
2248         /* Some chips have OTP located in the _top_ partition only.
2249            For example: Intel 28F256L18T (T means top-parameter device) */
2250         if (cfi->mfr == CFI_MFR_INTEL) {
2251                 switch (cfi->id) {
2252                 case 0x880b:
2253                 case 0x880c:
2254                 case 0x880d:
2255                         chip_num = chip_step - 1;
2256                 }
2257         }
2258
2259         for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2260                 chip = &cfi->chips[chip_num];
2261                 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2262
2263                 /* first OTP region */
2264                 field = 0;
2265                 reg_prot_offset = extp->ProtRegAddr;
2266                 reg_fact_groups = 1;
2267                 reg_fact_size = 1 << extp->FactProtRegSize;
2268                 reg_user_groups = 1;
2269                 reg_user_size = 1 << extp->UserProtRegSize;
2270
2271                 while (len > 0) {
2272                         /* flash geometry fixup */
2273                         data_offset = reg_prot_offset + 1;
2274                         data_offset *= cfi->interleave * cfi->device_type;
2275                         reg_prot_offset *= cfi->interleave * cfi->device_type;
2276                         reg_fact_size *= cfi->interleave;
2277                         reg_user_size *= cfi->interleave;
2278
2279                         if (user_regs) {
2280                                 groups = reg_user_groups;
2281                                 groupsize = reg_user_size;
2282                                 /* skip over factory reg area */
2283                                 groupno = reg_fact_groups;
2284                                 data_offset += reg_fact_groups * reg_fact_size;
2285                         } else {
2286                                 groups = reg_fact_groups;
2287                                 groupsize = reg_fact_size;
2288                                 groupno = 0;
2289                         }
2290
2291                         while (len > 0 && groups > 0) {
2292                                 if (!action) {
2293                                         /*
2294                                          * Special case: if action is NULL
2295                                          * we fill buf with otp_info records.
2296                                          */
2297                                         struct otp_info *otpinfo;
2298                                         map_word lockword;
2299                                         len -= sizeof(struct otp_info);
2300                                         if (len <= 0)
2301                                                 return -ENOSPC;
2302                                         ret = do_otp_read(map, chip,
2303                                                           reg_prot_offset,
2304                                                           (u_char *)&lockword,
2305                                                           map_bankwidth(map),
2306                                                           0, 0,  0);
2307                                         if (ret)
2308                                                 return ret;
2309                                         otpinfo = (struct otp_info *)buf;
2310                                         otpinfo->start = from;
2311                                         otpinfo->length = groupsize;
2312                                         otpinfo->locked =
2313                                            !map_word_bitsset(map, lockword,
2314                                                              CMD(1 << groupno));
2315                                         from += groupsize;
2316                                         buf += sizeof(*otpinfo);
2317                                         *retlen += sizeof(*otpinfo);
2318                                 } else if (from >= groupsize) {
2319                                         from -= groupsize;
2320                                         data_offset += groupsize;
2321                                 } else {
2322                                         int size = groupsize;
2323                                         data_offset += from;
2324                                         size -= from;
2325                                         from = 0;
2326                                         if (size > len)
2327                                                 size = len;
2328                                         ret = action(map, chip, data_offset,
2329                                                      buf, size, reg_prot_offset,
2330                                                      groupno, groupsize);
2331                                         if (ret < 0)
2332                                                 return ret;
2333                                         buf += size;
2334                                         len -= size;
2335                                         *retlen += size;
2336                                         data_offset += size;
2337                                 }
2338                                 groupno++;
2339                                 groups--;
2340                         }
2341
2342                         /* next OTP region */
2343                         if (++field == extp->NumProtectionFields)
2344                                 break;
2345                         reg_prot_offset = otp->ProtRegAddr;
2346                         reg_fact_groups = otp->FactGroups;
2347                         reg_fact_size = 1 << otp->FactProtRegSize;
2348                         reg_user_groups = otp->UserGroups;
2349                         reg_user_size = 1 << otp->UserProtRegSize;
2350                         otp++;
2351                 }
2352         }
2353
2354         return 0;
2355 }
2356
2357 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2358                                            size_t len, size_t *retlen,
2359                                             u_char *buf)
2360 {
2361         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2362                                      buf, do_otp_read, 0);
2363 }
2364
2365 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2366                                            size_t len, size_t *retlen,
2367                                             u_char *buf)
2368 {
2369         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2370                                      buf, do_otp_read, 1);
2371 }
2372
2373 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2374                                             size_t len, size_t *retlen,
2375                                              u_char *buf)
2376 {
2377         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2378                                      buf, do_otp_write, 1);
2379 }
2380
2381 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2382                                            loff_t from, size_t len)
2383 {
2384         size_t retlen;
2385         return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2386                                      NULL, do_otp_lock, 1);
2387 }
2388
2389 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2390                                            struct otp_info *buf, size_t len)
2391 {
2392         size_t retlen;
2393         int ret;
2394
2395         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2396         return ret ? : retlen;
2397 }
2398
2399 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2400                                            struct otp_info *buf, size_t len)
2401 {
2402         size_t retlen;
2403         int ret;
2404
2405         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2406         return ret ? : retlen;
2407 }
2408
2409 #endif
2410
2411 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2412 {
2413         struct mtd_erase_region_info *region;
2414         int block, status, i;
2415         unsigned long adr;
2416         size_t len;
2417
2418         for (i = 0; i < mtd->numeraseregions; i++) {
2419                 region = &mtd->eraseregions[i];
2420                 if (!region->lockmap)
2421                         continue;
2422
2423                 for (block = 0; block < region->numblocks; block++){
2424                         len = region->erasesize;
2425                         adr = region->offset + block * len;
2426
2427                         status = cfi_varsize_frob(mtd,
2428                                         do_getlockstatus_oneblock, adr, len, NULL);
2429                         if (status)
2430                                 set_bit(block, region->lockmap);
2431                         else
2432                                 clear_bit(block, region->lockmap);
2433                 }
2434         }
2435 }
2436
2437 static int cfi_intelext_suspend(struct mtd_info *mtd)
2438 {
2439         struct map_info *map = mtd->priv;
2440         struct cfi_private *cfi = map->fldrv_priv;
2441         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2442         int i;
2443         struct flchip *chip;
2444         int ret = 0;
2445
2446         if ((mtd->flags & MTD_POWERUP_LOCK)
2447             && extp && (extp->FeatureSupport & (1 << 5)))
2448                 cfi_intelext_save_locks(mtd);
2449
2450         for (i=0; !ret && i<cfi->numchips; i++) {
2451                 chip = &cfi->chips[i];
2452
2453                 spin_lock(chip->mutex);
2454
2455                 switch (chip->state) {
2456                 case FL_READY:
2457                 case FL_STATUS:
2458                 case FL_CFI_QUERY:
2459                 case FL_JEDEC_QUERY:
2460                         if (chip->oldstate == FL_READY) {
2461                                 /* place the chip in a known state before suspend */
2462                                 map_write(map, CMD(0xFF), cfi->chips[i].start);
2463                                 chip->oldstate = chip->state;
2464                                 chip->state = FL_PM_SUSPENDED;
2465                                 /* No need to wake_up() on this state change -
2466                                  * as the whole point is that nobody can do anything
2467                                  * with the chip now anyway.
2468                                  */
2469                         } else {
2470                                 /* There seems to be an operation pending. We must wait for it. */
2471                                 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2472                                 ret = -EAGAIN;
2473                         }
2474                         break;
2475                 default:
2476                         /* Should we actually wait? Once upon a time these routines weren't
2477                            allowed to. Or should we return -EAGAIN, because the upper layers
2478                            ought to have already shut down anything which was using the device
2479                            anyway? The latter for now. */
2480                         printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2481                         ret = -EAGAIN;
2482                 case FL_PM_SUSPENDED:
2483                         break;
2484                 }
2485                 spin_unlock(chip->mutex);
2486         }
2487
2488         /* Unlock the chips again */
2489
2490         if (ret) {
2491                 for (i--; i >=0; i--) {
2492                         chip = &cfi->chips[i];
2493
2494                         spin_lock(chip->mutex);
2495
2496                         if (chip->state == FL_PM_SUSPENDED) {
2497                                 /* No need to force it into a known state here,
2498                                    because we're returning failure, and it didn't
2499                                    get power cycled */
2500                                 chip->state = chip->oldstate;
2501                                 chip->oldstate = FL_READY;
2502                                 wake_up(&chip->wq);
2503                         }
2504                         spin_unlock(chip->mutex);
2505                 }
2506         }
2507
2508         return ret;
2509 }
2510
2511 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2512 {
2513         struct mtd_erase_region_info *region;
2514         int block, i;
2515         unsigned long adr;
2516         size_t len;
2517
2518         for (i = 0; i < mtd->numeraseregions; i++) {
2519                 region = &mtd->eraseregions[i];
2520                 if (!region->lockmap)
2521                         continue;
2522
2523                 for (block = 0; block < region->numblocks; block++) {
2524                         len = region->erasesize;
2525                         adr = region->offset + block * len;
2526
2527                         if (!test_bit(block, region->lockmap))
2528                                 cfi_intelext_unlock(mtd, adr, len);
2529                 }
2530         }
2531 }
2532
2533 static void cfi_intelext_resume(struct mtd_info *mtd)
2534 {
2535         struct map_info *map = mtd->priv;
2536         struct cfi_private *cfi = map->fldrv_priv;
2537         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2538         int i;
2539         struct flchip *chip;
2540
2541         for (i=0; i<cfi->numchips; i++) {
2542
2543                 chip = &cfi->chips[i];
2544
2545                 spin_lock(chip->mutex);
2546
2547                 /* Go to known state. Chip may have been power cycled */
2548                 if (chip->state == FL_PM_SUSPENDED) {
2549                         map_write(map, CMD(0xFF), cfi->chips[i].start);
2550                         chip->oldstate = chip->state = FL_READY;
2551                         wake_up(&chip->wq);
2552                 }
2553
2554                 spin_unlock(chip->mutex);
2555         }
2556
2557         if ((mtd->flags & MTD_POWERUP_LOCK)
2558             && extp && (extp->FeatureSupport & (1 << 5)))
2559                 cfi_intelext_restore_locks(mtd);
2560 }
2561
2562 static int cfi_intelext_reset(struct mtd_info *mtd)
2563 {
2564         struct map_info *map = mtd->priv;
2565         struct cfi_private *cfi = map->fldrv_priv;
2566         int i, ret;
2567
2568         for (i=0; i < cfi->numchips; i++) {
2569                 struct flchip *chip = &cfi->chips[i];
2570
2571                 /* force the completion of any ongoing operation
2572                    and switch to array mode so any bootloader in
2573                    flash is accessible for soft reboot. */
2574                 spin_lock(chip->mutex);
2575                 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2576                 if (!ret) {
2577                         map_write(map, CMD(0xff), chip->start);
2578                         chip->state = FL_SHUTDOWN;
2579                         put_chip(map, chip, chip->start);
2580                 }
2581                 spin_unlock(chip->mutex);
2582         }
2583
2584         return 0;
2585 }
2586
2587 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2588                                void *v)
2589 {
2590         struct mtd_info *mtd;
2591
2592         mtd = container_of(nb, struct mtd_info, reboot_notifier);
2593         cfi_intelext_reset(mtd);
2594         return NOTIFY_DONE;
2595 }
2596
2597 static void cfi_intelext_destroy(struct mtd_info *mtd)
2598 {
2599         struct map_info *map = mtd->priv;
2600         struct cfi_private *cfi = map->fldrv_priv;
2601         struct mtd_erase_region_info *region;
2602         int i;
2603         cfi_intelext_reset(mtd);
2604         unregister_reboot_notifier(&mtd->reboot_notifier);
2605         kfree(cfi->cmdset_priv);
2606         kfree(cfi->cfiq);
2607         kfree(cfi->chips[0].priv);
2608         kfree(cfi);
2609         for (i = 0; i < mtd->numeraseregions; i++) {
2610                 region = &mtd->eraseregions[i];
2611                 if (region->lockmap)
2612                         kfree(region->lockmap);
2613         }
2614         kfree(mtd->eraseregions);
2615 }
2616
2617 MODULE_LICENSE("GPL");
2618 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2619 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2620 MODULE_ALIAS("cfi_cmdset_0003");
2621 MODULE_ALIAS("cfi_cmdset_0200");