8189adfefaef5db6c26b81ae4b329d9fcee1ff16
[linux-2.6.git] / drivers / mtd / chips / cfi_cmdset_0001.c
1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  * $Id: cfi_cmdset_0001.c,v 1.186 2005/11/23 22:07:52 nico Exp $
8  *
9  *
10  * 10/10/2000   Nicolas Pitre <nico@cam.org>
11  *      - completely revamped method functions so they are aware and
12  *        independent of the flash geometry (buswidth, interleave, etc.)
13  *      - scalability vs code size is completely set at compile-time
14  *        (see include/linux/mtd/cfi.h for selection)
15  *      - optimized write buffer method
16  * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17  *      - reworked lock/unlock/erase support for var size flash
18  * 21/03/2007   Rodolfo Giometti <giometti@linux.it>
19  *      - auto unlock sectors on resume for auto locking flash on power up
20  */
21
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
26 #include <linux/init.h>
27 #include <asm/io.h>
28 #include <asm/byteorder.h>
29
30 #include <linux/errno.h>
31 #include <linux/slab.h>
32 #include <linux/delay.h>
33 #include <linux/interrupt.h>
34 #include <linux/reboot.h>
35 #include <linux/bitmap.h>
36 #include <linux/mtd/xip.h>
37 #include <linux/mtd/map.h>
38 #include <linux/mtd/mtd.h>
39 #include <linux/mtd/compatmac.h>
40 #include <linux/mtd/cfi.h>
41
42 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
43 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
44
45 // debugging, turns off buffer write mode if set to 1
46 #define FORCE_WORD_WRITE 0
47
48 #define MANUFACTURER_INTEL      0x0089
49 #define I82802AB        0x00ad
50 #define I82802AC        0x00ac
51 #define MANUFACTURER_ST         0x0020
52 #define M50LPW080       0x002F
53 #define AT49BV640D      0x02de
54
55 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
56 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
57 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
58 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
59 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
60 static void cfi_intelext_sync (struct mtd_info *);
61 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
62 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
63 #ifdef CONFIG_MTD_OTP
64 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
65 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
66 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
67 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
68 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
69                                             struct otp_info *, size_t);
70 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
71                                             struct otp_info *, size_t);
72 #endif
73 static int cfi_intelext_suspend (struct mtd_info *);
74 static void cfi_intelext_resume (struct mtd_info *);
75 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
76
77 static void cfi_intelext_destroy(struct mtd_info *);
78
79 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
80
81 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
82 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
83
84 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
85                      size_t *retlen, u_char **mtdbuf);
86 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
87                         size_t len);
88
89 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
90 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
91 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
92 #include "fwh_lock.h"
93
94
95
96 /*
97  *  *********** SETUP AND PROBE BITS  ***********
98  */
99
100 static struct mtd_chip_driver cfi_intelext_chipdrv = {
101         .probe          = NULL, /* Not usable directly */
102         .destroy        = cfi_intelext_destroy,
103         .name           = "cfi_cmdset_0001",
104         .module         = THIS_MODULE
105 };
106
107 /* #define DEBUG_LOCK_BITS */
108 /* #define DEBUG_CFI_FEATURES */
109
110 #ifdef DEBUG_CFI_FEATURES
111 static void cfi_tell_features(struct cfi_pri_intelext *extp)
112 {
113         int i;
114         printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
115         printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
116         printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
117         printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
118         printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
119         printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
120         printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
121         printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
122         printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
123         printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
124         printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
125         printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
126         printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
127         for (i=11; i<32; i++) {
128                 if (extp->FeatureSupport & (1<<i))
129                         printk("     - Unknown Bit %X:      supported\n", i);
130         }
131
132         printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
133         printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
134         for (i=1; i<8; i++) {
135                 if (extp->SuspendCmdSupport & (1<<i))
136                         printk("     - Unknown Bit %X:               supported\n", i);
137         }
138
139         printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
140         printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
141         printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
142         for (i=2; i<3; i++) {
143                 if (extp->BlkStatusRegMask & (1<<i))
144                         printk("     - Unknown Bit %X Active: yes\n",i);
145         }
146         printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
147         printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
148         for (i=6; i<16; i++) {
149                 if (extp->BlkStatusRegMask & (1<<i))
150                         printk("     - Unknown Bit %X Active: yes\n",i);
151         }
152
153         printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
154                extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
155         if (extp->VppOptimal)
156                 printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
157                        extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
158 }
159 #endif
160
161 /* Atmel chips don't use the same PRI format as Intel chips */
162 static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
163 {
164         struct map_info *map = mtd->priv;
165         struct cfi_private *cfi = map->fldrv_priv;
166         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
167         struct cfi_pri_atmel atmel_pri;
168         uint32_t features = 0;
169
170         /* Reverse byteswapping */
171         extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
172         extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
173         extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
174
175         memcpy(&atmel_pri, extp, sizeof(atmel_pri));
176         memset((char *)extp + 5, 0, sizeof(*extp) - 5);
177
178         printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
179
180         if (atmel_pri.Features & 0x01) /* chip erase supported */
181                 features |= (1<<0);
182         if (atmel_pri.Features & 0x02) /* erase suspend supported */
183                 features |= (1<<1);
184         if (atmel_pri.Features & 0x04) /* program suspend supported */
185                 features |= (1<<2);
186         if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
187                 features |= (1<<9);
188         if (atmel_pri.Features & 0x20) /* page mode read supported */
189                 features |= (1<<7);
190         if (atmel_pri.Features & 0x40) /* queued erase supported */
191                 features |= (1<<4);
192         if (atmel_pri.Features & 0x80) /* Protection bits supported */
193                 features |= (1<<6);
194
195         extp->FeatureSupport = features;
196
197         /* burst write mode not supported */
198         cfi->cfiq->BufWriteTimeoutTyp = 0;
199         cfi->cfiq->BufWriteTimeoutMax = 0;
200 }
201
202 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
203 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
204 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
205 {
206         struct map_info *map = mtd->priv;
207         struct cfi_private *cfi = map->fldrv_priv;
208         struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
209
210         printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
211                             "erase on write disabled.\n");
212         extp->SuspendCmdSupport &= ~1;
213 }
214 #endif
215
216 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
217 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
218 {
219         struct map_info *map = mtd->priv;
220         struct cfi_private *cfi = map->fldrv_priv;
221         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
222
223         if (cfip && (cfip->FeatureSupport&4)) {
224                 cfip->FeatureSupport &= ~4;
225                 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
226         }
227 }
228 #endif
229
230 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
231 {
232         struct map_info *map = mtd->priv;
233         struct cfi_private *cfi = map->fldrv_priv;
234
235         cfi->cfiq->BufWriteTimeoutTyp = 0;      /* Not supported */
236         cfi->cfiq->BufWriteTimeoutMax = 0;      /* Not supported */
237 }
238
239 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
240 {
241         struct map_info *map = mtd->priv;
242         struct cfi_private *cfi = map->fldrv_priv;
243
244         /* Note this is done after the region info is endian swapped */
245         cfi->cfiq->EraseRegionInfo[1] =
246                 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
247 };
248
249 static void fixup_use_point(struct mtd_info *mtd, void *param)
250 {
251         struct map_info *map = mtd->priv;
252         if (!mtd->point && map_is_linear(map)) {
253                 mtd->point   = cfi_intelext_point;
254                 mtd->unpoint = cfi_intelext_unpoint;
255         }
256 }
257
258 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
259 {
260         struct map_info *map = mtd->priv;
261         struct cfi_private *cfi = map->fldrv_priv;
262         if (cfi->cfiq->BufWriteTimeoutTyp) {
263                 printk(KERN_INFO "Using buffer write method\n" );
264                 mtd->write = cfi_intelext_write_buffers;
265                 mtd->writev = cfi_intelext_writev;
266         }
267 }
268
269 /*
270  * Some chips power-up with all sectors locked by default.
271  */
272 static void fixup_unlock_powerup_lock(struct mtd_info *mtd, void *param)
273 {
274         struct map_info *map = mtd->priv;
275         struct cfi_private *cfi = map->fldrv_priv;
276         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
277
278         if (cfip->FeatureSupport&32) {
279                 printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
280                 mtd->flags |= MTD_POWERUP_LOCK;
281         }
282 }
283
284 static struct cfi_fixup cfi_fixup_table[] = {
285         { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
286 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
287         { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
288 #endif
289 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
290         { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
291 #endif
292 #if !FORCE_WORD_WRITE
293         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
294 #endif
295         { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
296         { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
297         { MANUFACTURER_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock, NULL, },
298         { 0, 0, NULL, NULL }
299 };
300
301 static struct cfi_fixup jedec_fixup_table[] = {
302         { MANUFACTURER_INTEL, I82802AB,   fixup_use_fwh_lock, NULL, },
303         { MANUFACTURER_INTEL, I82802AC,   fixup_use_fwh_lock, NULL, },
304         { MANUFACTURER_ST,    M50LPW080,  fixup_use_fwh_lock, NULL, },
305         { 0, 0, NULL, NULL }
306 };
307 static struct cfi_fixup fixup_table[] = {
308         /* The CFI vendor ids and the JEDEC vendor IDs appear
309          * to be common.  It is like the devices id's are as
310          * well.  This table is to pick all cases where
311          * we know that is the case.
312          */
313         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
314         { 0, 0, NULL, NULL }
315 };
316
317 static inline struct cfi_pri_intelext *
318 read_pri_intelext(struct map_info *map, __u16 adr)
319 {
320         struct cfi_pri_intelext *extp;
321         unsigned int extp_size = sizeof(*extp);
322
323  again:
324         extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
325         if (!extp)
326                 return NULL;
327
328         if (extp->MajorVersion != '1' ||
329             (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
330                 printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
331                        "version %c.%c.\n",  extp->MajorVersion,
332                        extp->MinorVersion);
333                 kfree(extp);
334                 return NULL;
335         }
336
337         /* Do some byteswapping if necessary */
338         extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
339         extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
340         extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
341
342         if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
343                 unsigned int extra_size = 0;
344                 int nb_parts, i;
345
346                 /* Protection Register info */
347                 extra_size += (extp->NumProtectionFields - 1) *
348                               sizeof(struct cfi_intelext_otpinfo);
349
350                 /* Burst Read info */
351                 extra_size += 2;
352                 if (extp_size < sizeof(*extp) + extra_size)
353                         goto need_more;
354                 extra_size += extp->extra[extra_size-1];
355
356                 /* Number of hardware-partitions */
357                 extra_size += 1;
358                 if (extp_size < sizeof(*extp) + extra_size)
359                         goto need_more;
360                 nb_parts = extp->extra[extra_size - 1];
361
362                 /* skip the sizeof(partregion) field in CFI 1.4 */
363                 if (extp->MinorVersion >= '4')
364                         extra_size += 2;
365
366                 for (i = 0; i < nb_parts; i++) {
367                         struct cfi_intelext_regioninfo *rinfo;
368                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
369                         extra_size += sizeof(*rinfo);
370                         if (extp_size < sizeof(*extp) + extra_size)
371                                 goto need_more;
372                         rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
373                         extra_size += (rinfo->NumBlockTypes - 1)
374                                       * sizeof(struct cfi_intelext_blockinfo);
375                 }
376
377                 if (extp->MinorVersion >= '4')
378                         extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
379
380                 if (extp_size < sizeof(*extp) + extra_size) {
381                         need_more:
382                         extp_size = sizeof(*extp) + extra_size;
383                         kfree(extp);
384                         if (extp_size > 4096) {
385                                 printk(KERN_ERR
386                                         "%s: cfi_pri_intelext is too fat\n",
387                                         __FUNCTION__);
388                                 return NULL;
389                         }
390                         goto again;
391                 }
392         }
393
394         return extp;
395 }
396
397 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
398 {
399         struct cfi_private *cfi = map->fldrv_priv;
400         struct mtd_info *mtd;
401         int i;
402
403         mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
404         if (!mtd) {
405                 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
406                 return NULL;
407         }
408         mtd->priv = map;
409         mtd->type = MTD_NORFLASH;
410
411         /* Fill in the default mtd operations */
412         mtd->erase   = cfi_intelext_erase_varsize;
413         mtd->read    = cfi_intelext_read;
414         mtd->write   = cfi_intelext_write_words;
415         mtd->sync    = cfi_intelext_sync;
416         mtd->lock    = cfi_intelext_lock;
417         mtd->unlock  = cfi_intelext_unlock;
418         mtd->suspend = cfi_intelext_suspend;
419         mtd->resume  = cfi_intelext_resume;
420         mtd->flags   = MTD_CAP_NORFLASH;
421         mtd->name    = map->name;
422         mtd->writesize = 1;
423
424         mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
425
426         if (cfi->cfi_mode == CFI_MODE_CFI) {
427                 /*
428                  * It's a real CFI chip, not one for which the probe
429                  * routine faked a CFI structure. So we read the feature
430                  * table from it.
431                  */
432                 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
433                 struct cfi_pri_intelext *extp;
434
435                 extp = read_pri_intelext(map, adr);
436                 if (!extp) {
437                         kfree(mtd);
438                         return NULL;
439                 }
440
441                 /* Install our own private info structure */
442                 cfi->cmdset_priv = extp;
443
444                 cfi_fixup(mtd, cfi_fixup_table);
445
446 #ifdef DEBUG_CFI_FEATURES
447                 /* Tell the user about it in lots of lovely detail */
448                 cfi_tell_features(extp);
449 #endif
450
451                 if(extp->SuspendCmdSupport & 1) {
452                         printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
453                 }
454         }
455         else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
456                 /* Apply jedec specific fixups */
457                 cfi_fixup(mtd, jedec_fixup_table);
458         }
459         /* Apply generic fixups */
460         cfi_fixup(mtd, fixup_table);
461
462         for (i=0; i< cfi->numchips; i++) {
463                 if (cfi->cfiq->WordWriteTimeoutTyp)
464                         cfi->chips[i].word_write_time =
465                                 1<<cfi->cfiq->WordWriteTimeoutTyp;
466                 else
467                         cfi->chips[i].word_write_time = 50000;
468
469                 if (cfi->cfiq->BufWriteTimeoutTyp)
470                         cfi->chips[i].buffer_write_time =
471                                 1<<cfi->cfiq->BufWriteTimeoutTyp;
472                 /* No default; if it isn't specified, we won't use it */
473
474                 if (cfi->cfiq->BlockEraseTimeoutTyp)
475                         cfi->chips[i].erase_time =
476                                 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
477                 else
478                         cfi->chips[i].erase_time = 2000000;
479
480                 cfi->chips[i].ref_point_counter = 0;
481                 init_waitqueue_head(&(cfi->chips[i].wq));
482         }
483
484         map->fldrv = &cfi_intelext_chipdrv;
485
486         return cfi_intelext_setup(mtd);
487 }
488 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
489 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
490 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
491 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
492 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
493
494 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
495 {
496         struct map_info *map = mtd->priv;
497         struct cfi_private *cfi = map->fldrv_priv;
498         unsigned long offset = 0;
499         int i,j;
500         unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
501
502         //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
503
504         mtd->size = devsize * cfi->numchips;
505
506         mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
507         mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
508                         * mtd->numeraseregions, GFP_KERNEL);
509         if (!mtd->eraseregions) {
510                 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
511                 goto setup_err;
512         }
513
514         for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
515                 unsigned long ernum, ersize;
516                 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
517                 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
518
519                 if (mtd->erasesize < ersize) {
520                         mtd->erasesize = ersize;
521                 }
522                 for (j=0; j<cfi->numchips; j++) {
523                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
524                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
525                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
526                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
527                 }
528                 offset += (ersize * ernum);
529         }
530
531         if (offset != devsize) {
532                 /* Argh */
533                 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
534                 goto setup_err;
535         }
536
537         for (i=0; i<mtd->numeraseregions;i++){
538                 printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
539                        i,mtd->eraseregions[i].offset,
540                        mtd->eraseregions[i].erasesize,
541                        mtd->eraseregions[i].numblocks);
542         }
543
544 #ifdef CONFIG_MTD_OTP
545         mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
546         mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
547         mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
548         mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
549         mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
550         mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
551 #endif
552
553         /* This function has the potential to distort the reality
554            a bit and therefore should be called last. */
555         if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
556                 goto setup_err;
557
558         __module_get(THIS_MODULE);
559         register_reboot_notifier(&mtd->reboot_notifier);
560         return mtd;
561
562  setup_err:
563         if(mtd) {
564                 kfree(mtd->eraseregions);
565                 kfree(mtd);
566         }
567         kfree(cfi->cmdset_priv);
568         return NULL;
569 }
570
571 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
572                                         struct cfi_private **pcfi)
573 {
574         struct map_info *map = mtd->priv;
575         struct cfi_private *cfi = *pcfi;
576         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
577
578         /*
579          * Probing of multi-partition flash chips.
580          *
581          * To support multiple partitions when available, we simply arrange
582          * for each of them to have their own flchip structure even if they
583          * are on the same physical chip.  This means completely recreating
584          * a new cfi_private structure right here which is a blatent code
585          * layering violation, but this is still the least intrusive
586          * arrangement at this point. This can be rearranged in the future
587          * if someone feels motivated enough.  --nico
588          */
589         if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
590             && extp->FeatureSupport & (1 << 9)) {
591                 struct cfi_private *newcfi;
592                 struct flchip *chip;
593                 struct flchip_shared *shared;
594                 int offs, numregions, numparts, partshift, numvirtchips, i, j;
595
596                 /* Protection Register info */
597                 offs = (extp->NumProtectionFields - 1) *
598                        sizeof(struct cfi_intelext_otpinfo);
599
600                 /* Burst Read info */
601                 offs += extp->extra[offs+1]+2;
602
603                 /* Number of partition regions */
604                 numregions = extp->extra[offs];
605                 offs += 1;
606
607                 /* skip the sizeof(partregion) field in CFI 1.4 */
608                 if (extp->MinorVersion >= '4')
609                         offs += 2;
610
611                 /* Number of hardware partitions */
612                 numparts = 0;
613                 for (i = 0; i < numregions; i++) {
614                         struct cfi_intelext_regioninfo *rinfo;
615                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
616                         numparts += rinfo->NumIdentPartitions;
617                         offs += sizeof(*rinfo)
618                                 + (rinfo->NumBlockTypes - 1) *
619                                   sizeof(struct cfi_intelext_blockinfo);
620                 }
621
622                 /* Programming Region info */
623                 if (extp->MinorVersion >= '4') {
624                         struct cfi_intelext_programming_regioninfo *prinfo;
625                         prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
626                         mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
627                         mtd->flags &= ~MTD_BIT_WRITEABLE;
628                         printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
629                                map->name, mtd->writesize,
630                                cfi->interleave * prinfo->ControlValid,
631                                cfi->interleave * prinfo->ControlInvalid);
632                 }
633
634                 /*
635                  * All functions below currently rely on all chips having
636                  * the same geometry so we'll just assume that all hardware
637                  * partitions are of the same size too.
638                  */
639                 partshift = cfi->chipshift - __ffs(numparts);
640
641                 if ((1 << partshift) < mtd->erasesize) {
642                         printk( KERN_ERR
643                                 "%s: bad number of hw partitions (%d)\n",
644                                 __FUNCTION__, numparts);
645                         return -EINVAL;
646                 }
647
648                 numvirtchips = cfi->numchips * numparts;
649                 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
650                 if (!newcfi)
651                         return -ENOMEM;
652                 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
653                 if (!shared) {
654                         kfree(newcfi);
655                         return -ENOMEM;
656                 }
657                 memcpy(newcfi, cfi, sizeof(struct cfi_private));
658                 newcfi->numchips = numvirtchips;
659                 newcfi->chipshift = partshift;
660
661                 chip = &newcfi->chips[0];
662                 for (i = 0; i < cfi->numchips; i++) {
663                         shared[i].writing = shared[i].erasing = NULL;
664                         spin_lock_init(&shared[i].lock);
665                         for (j = 0; j < numparts; j++) {
666                                 *chip = cfi->chips[i];
667                                 chip->start += j << partshift;
668                                 chip->priv = &shared[i];
669                                 /* those should be reset too since
670                                    they create memory references. */
671                                 init_waitqueue_head(&chip->wq);
672                                 spin_lock_init(&chip->_spinlock);
673                                 chip->mutex = &chip->_spinlock;
674                                 chip++;
675                         }
676                 }
677
678                 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
679                                   "--> %d partitions of %d KiB\n",
680                                   map->name, cfi->numchips, cfi->interleave,
681                                   newcfi->numchips, 1<<(newcfi->chipshift-10));
682
683                 map->fldrv_priv = newcfi;
684                 *pcfi = newcfi;
685                 kfree(cfi);
686         }
687
688         return 0;
689 }
690
691 /*
692  *  *********** CHIP ACCESS FUNCTIONS ***********
693  */
694 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
695 {
696         DECLARE_WAITQUEUE(wait, current);
697         struct cfi_private *cfi = map->fldrv_priv;
698         map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
699         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
700         unsigned long timeo = jiffies + HZ;
701
702         switch (chip->state) {
703
704         case FL_STATUS:
705                 for (;;) {
706                         status = map_read(map, adr);
707                         if (map_word_andequal(map, status, status_OK, status_OK))
708                                 break;
709
710                         /* At this point we're fine with write operations
711                            in other partitions as they don't conflict. */
712                         if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
713                                 break;
714
715                         spin_unlock(chip->mutex);
716                         cfi_udelay(1);
717                         spin_lock(chip->mutex);
718                         /* Someone else might have been playing with it. */
719                         return -EAGAIN;
720                 }
721
722         case FL_READY:
723         case FL_CFI_QUERY:
724         case FL_JEDEC_QUERY:
725                 return 0;
726
727         case FL_ERASING:
728                 if (!cfip ||
729                     !(cfip->FeatureSupport & 2) ||
730                     !(mode == FL_READY || mode == FL_POINT ||
731                      (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
732                         goto sleep;
733
734
735                 /* Erase suspend */
736                 map_write(map, CMD(0xB0), adr);
737
738                 /* If the flash has finished erasing, then 'erase suspend'
739                  * appears to make some (28F320) flash devices switch to
740                  * 'read' mode.  Make sure that we switch to 'read status'
741                  * mode so we get the right data. --rmk
742                  */
743                 map_write(map, CMD(0x70), adr);
744                 chip->oldstate = FL_ERASING;
745                 chip->state = FL_ERASE_SUSPENDING;
746                 chip->erase_suspended = 1;
747                 for (;;) {
748                         status = map_read(map, adr);
749                         if (map_word_andequal(map, status, status_OK, status_OK))
750                                 break;
751
752                         if (time_after(jiffies, timeo)) {
753                                 /* Urgh. Resume and pretend we weren't here.  */
754                                 map_write(map, CMD(0xd0), adr);
755                                 /* Make sure we're in 'read status' mode if it had finished */
756                                 map_write(map, CMD(0x70), adr);
757                                 chip->state = FL_ERASING;
758                                 chip->oldstate = FL_READY;
759                                 printk(KERN_ERR "%s: Chip not ready after erase "
760                                        "suspended: status = 0x%lx\n", map->name, status.x[0]);
761                                 return -EIO;
762                         }
763
764                         spin_unlock(chip->mutex);
765                         cfi_udelay(1);
766                         spin_lock(chip->mutex);
767                         /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
768                            So we can just loop here. */
769                 }
770                 chip->state = FL_STATUS;
771                 return 0;
772
773         case FL_XIP_WHILE_ERASING:
774                 if (mode != FL_READY && mode != FL_POINT &&
775                     (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
776                         goto sleep;
777                 chip->oldstate = chip->state;
778                 chip->state = FL_READY;
779                 return 0;
780
781         case FL_POINT:
782                 /* Only if there's no operation suspended... */
783                 if (mode == FL_READY && chip->oldstate == FL_READY)
784                         return 0;
785
786         case FL_SHUTDOWN:
787                 /* The machine is rebooting now,so no one can get chip anymore */
788                 return -EIO;
789         default:
790         sleep:
791                 set_current_state(TASK_UNINTERRUPTIBLE);
792                 add_wait_queue(&chip->wq, &wait);
793                 spin_unlock(chip->mutex);
794                 schedule();
795                 remove_wait_queue(&chip->wq, &wait);
796                 spin_lock(chip->mutex);
797                 return -EAGAIN;
798         }
799 }
800
801 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
802 {
803         int ret;
804         DECLARE_WAITQUEUE(wait, current);
805
806  retry:
807         if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING
808                            || mode == FL_OTP_WRITE || mode == FL_SHUTDOWN)) {
809                 /*
810                  * OK. We have possibility for contention on the write/erase
811                  * operations which are global to the real chip and not per
812                  * partition.  So let's fight it over in the partition which
813                  * currently has authority on the operation.
814                  *
815                  * The rules are as follows:
816                  *
817                  * - any write operation must own shared->writing.
818                  *
819                  * - any erase operation must own _both_ shared->writing and
820                  *   shared->erasing.
821                  *
822                  * - contention arbitration is handled in the owner's context.
823                  *
824                  * The 'shared' struct can be read and/or written only when
825                  * its lock is taken.
826                  */
827                 struct flchip_shared *shared = chip->priv;
828                 struct flchip *contender;
829                 spin_lock(&shared->lock);
830                 contender = shared->writing;
831                 if (contender && contender != chip) {
832                         /*
833                          * The engine to perform desired operation on this
834                          * partition is already in use by someone else.
835                          * Let's fight over it in the context of the chip
836                          * currently using it.  If it is possible to suspend,
837                          * that other partition will do just that, otherwise
838                          * it'll happily send us to sleep.  In any case, when
839                          * get_chip returns success we're clear to go ahead.
840                          */
841                         ret = spin_trylock(contender->mutex);
842                         spin_unlock(&shared->lock);
843                         if (!ret)
844                                 goto retry;
845                         spin_unlock(chip->mutex);
846                         ret = chip_ready(map, contender, contender->start, mode);
847                         spin_lock(chip->mutex);
848
849                         if (ret == -EAGAIN) {
850                                 spin_unlock(contender->mutex);
851                                 goto retry;
852                         }
853                         if (ret) {
854                                 spin_unlock(contender->mutex);
855                                 return ret;
856                         }
857                         spin_lock(&shared->lock);
858                         spin_unlock(contender->mutex);
859                 }
860
861                 /* Check if we already have suspended erase
862                  * on this chip. Sleep. */
863                 if (mode == FL_ERASING && shared->erasing
864                     && shared->erasing->oldstate == FL_ERASING) {
865                         spin_unlock(&shared->lock);
866                         set_current_state(TASK_UNINTERRUPTIBLE);
867                         add_wait_queue(&chip->wq, &wait);
868                         spin_unlock(chip->mutex);
869                         schedule();
870                         remove_wait_queue(&chip->wq, &wait);
871                         spin_lock(chip->mutex);
872                         goto retry;
873                 }
874
875                 /* We now own it */
876                 shared->writing = chip;
877                 if (mode == FL_ERASING)
878                         shared->erasing = chip;
879                 spin_unlock(&shared->lock);
880         }
881         ret = chip_ready(map, chip, adr, mode);
882         if (ret == -EAGAIN)
883                 goto retry;
884
885         return ret;
886 }
887
888 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
889 {
890         struct cfi_private *cfi = map->fldrv_priv;
891
892         if (chip->priv) {
893                 struct flchip_shared *shared = chip->priv;
894                 spin_lock(&shared->lock);
895                 if (shared->writing == chip && chip->oldstate == FL_READY) {
896                         /* We own the ability to write, but we're done */
897                         shared->writing = shared->erasing;
898                         if (shared->writing && shared->writing != chip) {
899                                 /* give back ownership to who we loaned it from */
900                                 struct flchip *loaner = shared->writing;
901                                 spin_lock(loaner->mutex);
902                                 spin_unlock(&shared->lock);
903                                 spin_unlock(chip->mutex);
904                                 put_chip(map, loaner, loaner->start);
905                                 spin_lock(chip->mutex);
906                                 spin_unlock(loaner->mutex);
907                                 wake_up(&chip->wq);
908                                 return;
909                         }
910                         shared->erasing = NULL;
911                         shared->writing = NULL;
912                 } else if (shared->erasing == chip && shared->writing != chip) {
913                         /*
914                          * We own the ability to erase without the ability
915                          * to write, which means the erase was suspended
916                          * and some other partition is currently writing.
917                          * Don't let the switch below mess things up since
918                          * we don't have ownership to resume anything.
919                          */
920                         spin_unlock(&shared->lock);
921                         wake_up(&chip->wq);
922                         return;
923                 }
924                 spin_unlock(&shared->lock);
925         }
926
927         switch(chip->oldstate) {
928         case FL_ERASING:
929                 chip->state = chip->oldstate;
930                 /* What if one interleaved chip has finished and the
931                    other hasn't? The old code would leave the finished
932                    one in READY mode. That's bad, and caused -EROFS
933                    errors to be returned from do_erase_oneblock because
934                    that's the only bit it checked for at the time.
935                    As the state machine appears to explicitly allow
936                    sending the 0x70 (Read Status) command to an erasing
937                    chip and expecting it to be ignored, that's what we
938                    do. */
939                 map_write(map, CMD(0xd0), adr);
940                 map_write(map, CMD(0x70), adr);
941                 chip->oldstate = FL_READY;
942                 chip->state = FL_ERASING;
943                 break;
944
945         case FL_XIP_WHILE_ERASING:
946                 chip->state = chip->oldstate;
947                 chip->oldstate = FL_READY;
948                 break;
949
950         case FL_READY:
951         case FL_STATUS:
952         case FL_JEDEC_QUERY:
953                 /* We should really make set_vpp() count, rather than doing this */
954                 DISABLE_VPP(map);
955                 break;
956         default:
957                 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
958         }
959         wake_up(&chip->wq);
960 }
961
962 #ifdef CONFIG_MTD_XIP
963
964 /*
965  * No interrupt what so ever can be serviced while the flash isn't in array
966  * mode.  This is ensured by the xip_disable() and xip_enable() functions
967  * enclosing any code path where the flash is known not to be in array mode.
968  * And within a XIP disabled code path, only functions marked with __xipram
969  * may be called and nothing else (it's a good thing to inspect generated
970  * assembly to make sure inline functions were actually inlined and that gcc
971  * didn't emit calls to its own support functions). Also configuring MTD CFI
972  * support to a single buswidth and a single interleave is also recommended.
973  */
974
975 static void xip_disable(struct map_info *map, struct flchip *chip,
976                         unsigned long adr)
977 {
978         /* TODO: chips with no XIP use should ignore and return */
979         (void) map_read(map, adr); /* ensure mmu mapping is up to date */
980         local_irq_disable();
981 }
982
983 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
984                                 unsigned long adr)
985 {
986         struct cfi_private *cfi = map->fldrv_priv;
987         if (chip->state != FL_POINT && chip->state != FL_READY) {
988                 map_write(map, CMD(0xff), adr);
989                 chip->state = FL_READY;
990         }
991         (void) map_read(map, adr);
992         xip_iprefetch();
993         local_irq_enable();
994 }
995
996 /*
997  * When a delay is required for the flash operation to complete, the
998  * xip_wait_for_operation() function is polling for both the given timeout
999  * and pending (but still masked) hardware interrupts.  Whenever there is an
1000  * interrupt pending then the flash erase or write operation is suspended,
1001  * array mode restored and interrupts unmasked.  Task scheduling might also
1002  * happen at that point.  The CPU eventually returns from the interrupt or
1003  * the call to schedule() and the suspended flash operation is resumed for
1004  * the remaining of the delay period.
1005  *
1006  * Warning: this function _will_ fool interrupt latency tracing tools.
1007  */
1008
1009 static int __xipram xip_wait_for_operation(
1010                 struct map_info *map, struct flchip *chip,
1011                 unsigned long adr, unsigned int chip_op_time )
1012 {
1013         struct cfi_private *cfi = map->fldrv_priv;
1014         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1015         map_word status, OK = CMD(0x80);
1016         unsigned long usec, suspended, start, done;
1017         flstate_t oldstate, newstate;
1018
1019         start = xip_currtime();
1020         usec = chip_op_time * 8;
1021         if (usec == 0)
1022                 usec = 500000;
1023         done = 0;
1024
1025         do {
1026                 cpu_relax();
1027                 if (xip_irqpending() && cfip &&
1028                     ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1029                      (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1030                     (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1031                         /*
1032                          * Let's suspend the erase or write operation when
1033                          * supported.  Note that we currently don't try to
1034                          * suspend interleaved chips if there is already
1035                          * another operation suspended (imagine what happens
1036                          * when one chip was already done with the current
1037                          * operation while another chip suspended it, then
1038                          * we resume the whole thing at once).  Yes, it
1039                          * can happen!
1040                          */
1041                         usec -= done;
1042                         map_write(map, CMD(0xb0), adr);
1043                         map_write(map, CMD(0x70), adr);
1044                         suspended = xip_currtime();
1045                         do {
1046                                 if (xip_elapsed_since(suspended) > 100000) {
1047                                         /*
1048                                          * The chip doesn't want to suspend
1049                                          * after waiting for 100 msecs.
1050                                          * This is a critical error but there
1051                                          * is not much we can do here.
1052                                          */
1053                                         return -EIO;
1054                                 }
1055                                 status = map_read(map, adr);
1056                         } while (!map_word_andequal(map, status, OK, OK));
1057
1058                         /* Suspend succeeded */
1059                         oldstate = chip->state;
1060                         if (oldstate == FL_ERASING) {
1061                                 if (!map_word_bitsset(map, status, CMD(0x40)))
1062                                         break;
1063                                 newstate = FL_XIP_WHILE_ERASING;
1064                                 chip->erase_suspended = 1;
1065                         } else {
1066                                 if (!map_word_bitsset(map, status, CMD(0x04)))
1067                                         break;
1068                                 newstate = FL_XIP_WHILE_WRITING;
1069                                 chip->write_suspended = 1;
1070                         }
1071                         chip->state = newstate;
1072                         map_write(map, CMD(0xff), adr);
1073                         (void) map_read(map, adr);
1074                         asm volatile (".rep 8; nop; .endr");
1075                         local_irq_enable();
1076                         spin_unlock(chip->mutex);
1077                         asm volatile (".rep 8; nop; .endr");
1078                         cond_resched();
1079
1080                         /*
1081                          * We're back.  However someone else might have
1082                          * decided to go write to the chip if we are in
1083                          * a suspended erase state.  If so let's wait
1084                          * until it's done.
1085                          */
1086                         spin_lock(chip->mutex);
1087                         while (chip->state != newstate) {
1088                                 DECLARE_WAITQUEUE(wait, current);
1089                                 set_current_state(TASK_UNINTERRUPTIBLE);
1090                                 add_wait_queue(&chip->wq, &wait);
1091                                 spin_unlock(chip->mutex);
1092                                 schedule();
1093                                 remove_wait_queue(&chip->wq, &wait);
1094                                 spin_lock(chip->mutex);
1095                         }
1096                         /* Disallow XIP again */
1097                         local_irq_disable();
1098
1099                         /* Resume the write or erase operation */
1100                         map_write(map, CMD(0xd0), adr);
1101                         map_write(map, CMD(0x70), adr);
1102                         chip->state = oldstate;
1103                         start = xip_currtime();
1104                 } else if (usec >= 1000000/HZ) {
1105                         /*
1106                          * Try to save on CPU power when waiting delay
1107                          * is at least a system timer tick period.
1108                          * No need to be extremely accurate here.
1109                          */
1110                         xip_cpu_idle();
1111                 }
1112                 status = map_read(map, adr);
1113                 done = xip_elapsed_since(start);
1114         } while (!map_word_andequal(map, status, OK, OK)
1115                  && done < usec);
1116
1117         return (done >= usec) ? -ETIME : 0;
1118 }
1119
1120 /*
1121  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1122  * the flash is actively programming or erasing since we have to poll for
1123  * the operation to complete anyway.  We can't do that in a generic way with
1124  * a XIP setup so do it before the actual flash operation in this case
1125  * and stub it out from INVAL_CACHE_AND_WAIT.
1126  */
1127 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1128         INVALIDATE_CACHED_RANGE(map, from, size)
1129
1130 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec) \
1131         xip_wait_for_operation(map, chip, cmd_adr, usec)
1132
1133 #else
1134
1135 #define xip_disable(map, chip, adr)
1136 #define xip_enable(map, chip, adr)
1137 #define XIP_INVAL_CACHED_RANGE(x...)
1138 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1139
1140 static int inval_cache_and_wait_for_operation(
1141                 struct map_info *map, struct flchip *chip,
1142                 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1143                 unsigned int chip_op_time)
1144 {
1145         struct cfi_private *cfi = map->fldrv_priv;
1146         map_word status, status_OK = CMD(0x80);
1147         int chip_state = chip->state;
1148         unsigned int timeo, sleep_time;
1149
1150         spin_unlock(chip->mutex);
1151         if (inval_len)
1152                 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1153         spin_lock(chip->mutex);
1154
1155         /* set our timeout to 8 times the expected delay */
1156         timeo = chip_op_time * 8;
1157         if (!timeo)
1158                 timeo = 500000;
1159         sleep_time = chip_op_time / 2;
1160
1161         for (;;) {
1162                 status = map_read(map, cmd_adr);
1163                 if (map_word_andequal(map, status, status_OK, status_OK))
1164                         break;
1165
1166                 if (!timeo) {
1167                         map_write(map, CMD(0x70), cmd_adr);
1168                         chip->state = FL_STATUS;
1169                         return -ETIME;
1170                 }
1171
1172                 /* OK Still waiting. Drop the lock, wait a while and retry. */
1173                 spin_unlock(chip->mutex);
1174                 if (sleep_time >= 1000000/HZ) {
1175                         /*
1176                          * Half of the normal delay still remaining
1177                          * can be performed with a sleeping delay instead
1178                          * of busy waiting.
1179                          */
1180                         msleep(sleep_time/1000);
1181                         timeo -= sleep_time;
1182                         sleep_time = 1000000/HZ;
1183                 } else {
1184                         udelay(1);
1185                         cond_resched();
1186                         timeo--;
1187                 }
1188                 spin_lock(chip->mutex);
1189
1190                 while (chip->state != chip_state) {
1191                         /* Someone's suspended the operation: sleep */
1192                         DECLARE_WAITQUEUE(wait, current);
1193                         set_current_state(TASK_UNINTERRUPTIBLE);
1194                         add_wait_queue(&chip->wq, &wait);
1195                         spin_unlock(chip->mutex);
1196                         schedule();
1197                         remove_wait_queue(&chip->wq, &wait);
1198                         spin_lock(chip->mutex);
1199                 }
1200         }
1201
1202         /* Done and happy. */
1203         chip->state = FL_STATUS;
1204         return 0;
1205 }
1206
1207 #endif
1208
1209 #define WAIT_TIMEOUT(map, chip, adr, udelay) \
1210         INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay);
1211
1212
1213 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1214 {
1215         unsigned long cmd_addr;
1216         struct cfi_private *cfi = map->fldrv_priv;
1217         int ret = 0;
1218
1219         adr += chip->start;
1220
1221         /* Ensure cmd read/writes are aligned. */
1222         cmd_addr = adr & ~(map_bankwidth(map)-1);
1223
1224         spin_lock(chip->mutex);
1225
1226         ret = get_chip(map, chip, cmd_addr, FL_POINT);
1227
1228         if (!ret) {
1229                 if (chip->state != FL_POINT && chip->state != FL_READY)
1230                         map_write(map, CMD(0xff), cmd_addr);
1231
1232                 chip->state = FL_POINT;
1233                 chip->ref_point_counter++;
1234         }
1235         spin_unlock(chip->mutex);
1236
1237         return ret;
1238 }
1239
1240 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1241 {
1242         struct map_info *map = mtd->priv;
1243         struct cfi_private *cfi = map->fldrv_priv;
1244         unsigned long ofs, last_end = 0;
1245         int chipnum;
1246         int ret = 0;
1247
1248         if (!map->virt || (from + len > mtd->size))
1249                 return -EINVAL;
1250
1251         /* Now lock the chip(s) to POINT state */
1252
1253         /* ofs: offset within the first chip that the first read should start */
1254         chipnum = (from >> cfi->chipshift);
1255         ofs = from - (chipnum << cfi->chipshift);
1256
1257         *mtdbuf = (void *)map->virt + cfi->chips[chipnum].start + ofs;
1258         *retlen = 0;
1259
1260         while (len) {
1261                 unsigned long thislen;
1262
1263                 if (chipnum >= cfi->numchips)
1264                         break;
1265
1266                 /* We cannot point across chips that are virtually disjoint */
1267                 if (!last_end)
1268                         last_end = cfi->chips[chipnum].start;
1269                 else if (cfi->chips[chipnum].start != last_end)
1270                         break;
1271
1272                 if ((len + ofs -1) >> cfi->chipshift)
1273                         thislen = (1<<cfi->chipshift) - ofs;
1274                 else
1275                         thislen = len;
1276
1277                 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1278                 if (ret)
1279                         break;
1280
1281                 *retlen += thislen;
1282                 len -= thislen;
1283
1284                 ofs = 0;
1285                 last_end += 1 << cfi->chipshift;
1286                 chipnum++;
1287         }
1288         return 0;
1289 }
1290
1291 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1292 {
1293         struct map_info *map = mtd->priv;
1294         struct cfi_private *cfi = map->fldrv_priv;
1295         unsigned long ofs;
1296         int chipnum;
1297
1298         /* Now unlock the chip(s) POINT state */
1299
1300         /* ofs: offset within the first chip that the first read should start */
1301         chipnum = (from >> cfi->chipshift);
1302         ofs = from - (chipnum <<  cfi->chipshift);
1303
1304         while (len) {
1305                 unsigned long thislen;
1306                 struct flchip *chip;
1307
1308                 chip = &cfi->chips[chipnum];
1309                 if (chipnum >= cfi->numchips)
1310                         break;
1311
1312                 if ((len + ofs -1) >> cfi->chipshift)
1313                         thislen = (1<<cfi->chipshift) - ofs;
1314                 else
1315                         thislen = len;
1316
1317                 spin_lock(chip->mutex);
1318                 if (chip->state == FL_POINT) {
1319                         chip->ref_point_counter--;
1320                         if(chip->ref_point_counter == 0)
1321                                 chip->state = FL_READY;
1322                 } else
1323                         printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1324
1325                 put_chip(map, chip, chip->start);
1326                 spin_unlock(chip->mutex);
1327
1328                 len -= thislen;
1329                 ofs = 0;
1330                 chipnum++;
1331         }
1332 }
1333
1334 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1335 {
1336         unsigned long cmd_addr;
1337         struct cfi_private *cfi = map->fldrv_priv;
1338         int ret;
1339
1340         adr += chip->start;
1341
1342         /* Ensure cmd read/writes are aligned. */
1343         cmd_addr = adr & ~(map_bankwidth(map)-1);
1344
1345         spin_lock(chip->mutex);
1346         ret = get_chip(map, chip, cmd_addr, FL_READY);
1347         if (ret) {
1348                 spin_unlock(chip->mutex);
1349                 return ret;
1350         }
1351
1352         if (chip->state != FL_POINT && chip->state != FL_READY) {
1353                 map_write(map, CMD(0xff), cmd_addr);
1354
1355                 chip->state = FL_READY;
1356         }
1357
1358         map_copy_from(map, buf, adr, len);
1359
1360         put_chip(map, chip, cmd_addr);
1361
1362         spin_unlock(chip->mutex);
1363         return 0;
1364 }
1365
1366 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1367 {
1368         struct map_info *map = mtd->priv;
1369         struct cfi_private *cfi = map->fldrv_priv;
1370         unsigned long ofs;
1371         int chipnum;
1372         int ret = 0;
1373
1374         /* ofs: offset within the first chip that the first read should start */
1375         chipnum = (from >> cfi->chipshift);
1376         ofs = from - (chipnum <<  cfi->chipshift);
1377
1378         *retlen = 0;
1379
1380         while (len) {
1381                 unsigned long thislen;
1382
1383                 if (chipnum >= cfi->numchips)
1384                         break;
1385
1386                 if ((len + ofs -1) >> cfi->chipshift)
1387                         thislen = (1<<cfi->chipshift) - ofs;
1388                 else
1389                         thislen = len;
1390
1391                 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1392                 if (ret)
1393                         break;
1394
1395                 *retlen += thislen;
1396                 len -= thislen;
1397                 buf += thislen;
1398
1399                 ofs = 0;
1400                 chipnum++;
1401         }
1402         return ret;
1403 }
1404
1405 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1406                                      unsigned long adr, map_word datum, int mode)
1407 {
1408         struct cfi_private *cfi = map->fldrv_priv;
1409         map_word status, write_cmd;
1410         int ret=0;
1411
1412         adr += chip->start;
1413
1414         switch (mode) {
1415         case FL_WRITING:
1416                 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1417                 break;
1418         case FL_OTP_WRITE:
1419                 write_cmd = CMD(0xc0);
1420                 break;
1421         default:
1422                 return -EINVAL;
1423         }
1424
1425         spin_lock(chip->mutex);
1426         ret = get_chip(map, chip, adr, mode);
1427         if (ret) {
1428                 spin_unlock(chip->mutex);
1429                 return ret;
1430         }
1431
1432         XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1433         ENABLE_VPP(map);
1434         xip_disable(map, chip, adr);
1435         map_write(map, write_cmd, adr);
1436         map_write(map, datum, adr);
1437         chip->state = mode;
1438
1439         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1440                                    adr, map_bankwidth(map),
1441                                    chip->word_write_time);
1442         if (ret) {
1443                 xip_enable(map, chip, adr);
1444                 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1445                 goto out;
1446         }
1447
1448         /* check for errors */
1449         status = map_read(map, adr);
1450         if (map_word_bitsset(map, status, CMD(0x1a))) {
1451                 unsigned long chipstatus = MERGESTATUS(status);
1452
1453                 /* reset status */
1454                 map_write(map, CMD(0x50), adr);
1455                 map_write(map, CMD(0x70), adr);
1456                 xip_enable(map, chip, adr);
1457
1458                 if (chipstatus & 0x02) {
1459                         ret = -EROFS;
1460                 } else if (chipstatus & 0x08) {
1461                         printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1462                         ret = -EIO;
1463                 } else {
1464                         printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1465                         ret = -EINVAL;
1466                 }
1467
1468                 goto out;
1469         }
1470
1471         xip_enable(map, chip, adr);
1472  out:   put_chip(map, chip, adr);
1473         spin_unlock(chip->mutex);
1474         return ret;
1475 }
1476
1477
1478 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1479 {
1480         struct map_info *map = mtd->priv;
1481         struct cfi_private *cfi = map->fldrv_priv;
1482         int ret = 0;
1483         int chipnum;
1484         unsigned long ofs;
1485
1486         *retlen = 0;
1487         if (!len)
1488                 return 0;
1489
1490         chipnum = to >> cfi->chipshift;
1491         ofs = to  - (chipnum << cfi->chipshift);
1492
1493         /* If it's not bus-aligned, do the first byte write */
1494         if (ofs & (map_bankwidth(map)-1)) {
1495                 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1496                 int gap = ofs - bus_ofs;
1497                 int n;
1498                 map_word datum;
1499
1500                 n = min_t(int, len, map_bankwidth(map)-gap);
1501                 datum = map_word_ff(map);
1502                 datum = map_word_load_partial(map, datum, buf, gap, n);
1503
1504                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1505                                                bus_ofs, datum, FL_WRITING);
1506                 if (ret)
1507                         return ret;
1508
1509                 len -= n;
1510                 ofs += n;
1511                 buf += n;
1512                 (*retlen) += n;
1513
1514                 if (ofs >> cfi->chipshift) {
1515                         chipnum ++;
1516                         ofs = 0;
1517                         if (chipnum == cfi->numchips)
1518                                 return 0;
1519                 }
1520         }
1521
1522         while(len >= map_bankwidth(map)) {
1523                 map_word datum = map_word_load(map, buf);
1524
1525                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1526                                        ofs, datum, FL_WRITING);
1527                 if (ret)
1528                         return ret;
1529
1530                 ofs += map_bankwidth(map);
1531                 buf += map_bankwidth(map);
1532                 (*retlen) += map_bankwidth(map);
1533                 len -= map_bankwidth(map);
1534
1535                 if (ofs >> cfi->chipshift) {
1536                         chipnum ++;
1537                         ofs = 0;
1538                         if (chipnum == cfi->numchips)
1539                                 return 0;
1540                 }
1541         }
1542
1543         if (len & (map_bankwidth(map)-1)) {
1544                 map_word datum;
1545
1546                 datum = map_word_ff(map);
1547                 datum = map_word_load_partial(map, datum, buf, 0, len);
1548
1549                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1550                                        ofs, datum, FL_WRITING);
1551                 if (ret)
1552                         return ret;
1553
1554                 (*retlen) += len;
1555         }
1556
1557         return 0;
1558 }
1559
1560
1561 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1562                                     unsigned long adr, const struct kvec **pvec,
1563                                     unsigned long *pvec_seek, int len)
1564 {
1565         struct cfi_private *cfi = map->fldrv_priv;
1566         map_word status, write_cmd, datum;
1567         unsigned long cmd_adr;
1568         int ret, wbufsize, word_gap, words;
1569         const struct kvec *vec;
1570         unsigned long vec_seek;
1571
1572         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1573         adr += chip->start;
1574         cmd_adr = adr & ~(wbufsize-1);
1575
1576         /* Let's determine this according to the interleave only once */
1577         write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1578
1579         spin_lock(chip->mutex);
1580         ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1581         if (ret) {
1582                 spin_unlock(chip->mutex);
1583                 return ret;
1584         }
1585
1586         XIP_INVAL_CACHED_RANGE(map, adr, len);
1587         ENABLE_VPP(map);
1588         xip_disable(map, chip, cmd_adr);
1589
1590         /* ยง4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1591            [...], the device will not accept any more Write to Buffer commands".
1592            So we must check here and reset those bits if they're set. Otherwise
1593            we're just pissing in the wind */
1594         if (chip->state != FL_STATUS) {
1595                 map_write(map, CMD(0x70), cmd_adr);
1596                 chip->state = FL_STATUS;
1597         }
1598         status = map_read(map, cmd_adr);
1599         if (map_word_bitsset(map, status, CMD(0x30))) {
1600                 xip_enable(map, chip, cmd_adr);
1601                 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1602                 xip_disable(map, chip, cmd_adr);
1603                 map_write(map, CMD(0x50), cmd_adr);
1604                 map_write(map, CMD(0x70), cmd_adr);
1605         }
1606
1607         chip->state = FL_WRITING_TO_BUFFER;
1608         map_write(map, write_cmd, cmd_adr);
1609         ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0);
1610         if (ret) {
1611                 /* Argh. Not ready for write to buffer */
1612                 map_word Xstatus = map_read(map, cmd_adr);
1613                 map_write(map, CMD(0x70), cmd_adr);
1614                 chip->state = FL_STATUS;
1615                 status = map_read(map, cmd_adr);
1616                 map_write(map, CMD(0x50), cmd_adr);
1617                 map_write(map, CMD(0x70), cmd_adr);
1618                 xip_enable(map, chip, cmd_adr);
1619                 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1620                                 map->name, Xstatus.x[0], status.x[0]);
1621                 goto out;
1622         }
1623
1624         /* Figure out the number of words to write */
1625         word_gap = (-adr & (map_bankwidth(map)-1));
1626         words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
1627         if (!word_gap) {
1628                 words--;
1629         } else {
1630                 word_gap = map_bankwidth(map) - word_gap;
1631                 adr -= word_gap;
1632                 datum = map_word_ff(map);
1633         }
1634
1635         /* Write length of data to come */
1636         map_write(map, CMD(words), cmd_adr );
1637
1638         /* Write data */
1639         vec = *pvec;
1640         vec_seek = *pvec_seek;
1641         do {
1642                 int n = map_bankwidth(map) - word_gap;
1643                 if (n > vec->iov_len - vec_seek)
1644                         n = vec->iov_len - vec_seek;
1645                 if (n > len)
1646                         n = len;
1647
1648                 if (!word_gap && len < map_bankwidth(map))
1649                         datum = map_word_ff(map);
1650
1651                 datum = map_word_load_partial(map, datum,
1652                                               vec->iov_base + vec_seek,
1653                                               word_gap, n);
1654
1655                 len -= n;
1656                 word_gap += n;
1657                 if (!len || word_gap == map_bankwidth(map)) {
1658                         map_write(map, datum, adr);
1659                         adr += map_bankwidth(map);
1660                         word_gap = 0;
1661                 }
1662
1663                 vec_seek += n;
1664                 if (vec_seek == vec->iov_len) {
1665                         vec++;
1666                         vec_seek = 0;
1667                 }
1668         } while (len);
1669         *pvec = vec;
1670         *pvec_seek = vec_seek;
1671
1672         /* GO GO GO */
1673         map_write(map, CMD(0xd0), cmd_adr);
1674         chip->state = FL_WRITING;
1675
1676         ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1677                                    adr, len,
1678                                    chip->buffer_write_time);
1679         if (ret) {
1680                 map_write(map, CMD(0x70), cmd_adr);
1681                 chip->state = FL_STATUS;
1682                 xip_enable(map, chip, cmd_adr);
1683                 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1684                 goto out;
1685         }
1686
1687         /* check for errors */
1688         status = map_read(map, cmd_adr);
1689         if (map_word_bitsset(map, status, CMD(0x1a))) {
1690                 unsigned long chipstatus = MERGESTATUS(status);
1691
1692                 /* reset status */
1693                 map_write(map, CMD(0x50), cmd_adr);
1694                 map_write(map, CMD(0x70), cmd_adr);
1695                 xip_enable(map, chip, cmd_adr);
1696
1697                 if (chipstatus & 0x02) {
1698                         ret = -EROFS;
1699                 } else if (chipstatus & 0x08) {
1700                         printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1701                         ret = -EIO;
1702                 } else {
1703                         printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1704                         ret = -EINVAL;
1705                 }
1706
1707                 goto out;
1708         }
1709
1710         xip_enable(map, chip, cmd_adr);
1711  out:   put_chip(map, chip, cmd_adr);
1712         spin_unlock(chip->mutex);
1713         return ret;
1714 }
1715
1716 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1717                                 unsigned long count, loff_t to, size_t *retlen)
1718 {
1719         struct map_info *map = mtd->priv;
1720         struct cfi_private *cfi = map->fldrv_priv;
1721         int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1722         int ret = 0;
1723         int chipnum;
1724         unsigned long ofs, vec_seek, i;
1725         size_t len = 0;
1726
1727         for (i = 0; i < count; i++)
1728                 len += vecs[i].iov_len;
1729
1730         *retlen = 0;
1731         if (!len)
1732                 return 0;
1733
1734         chipnum = to >> cfi->chipshift;
1735         ofs = to - (chipnum << cfi->chipshift);
1736         vec_seek = 0;
1737
1738         do {
1739                 /* We must not cross write block boundaries */
1740                 int size = wbufsize - (ofs & (wbufsize-1));
1741
1742                 if (size > len)
1743                         size = len;
1744                 ret = do_write_buffer(map, &cfi->chips[chipnum],
1745                                       ofs, &vecs, &vec_seek, size);
1746                 if (ret)
1747                         return ret;
1748
1749                 ofs += size;
1750                 (*retlen) += size;
1751                 len -= size;
1752
1753                 if (ofs >> cfi->chipshift) {
1754                         chipnum ++;
1755                         ofs = 0;
1756                         if (chipnum == cfi->numchips)
1757                                 return 0;
1758                 }
1759
1760                 /* Be nice and reschedule with the chip in a usable state for other
1761                    processes. */
1762                 cond_resched();
1763
1764         } while (len);
1765
1766         return 0;
1767 }
1768
1769 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1770                                        size_t len, size_t *retlen, const u_char *buf)
1771 {
1772         struct kvec vec;
1773
1774         vec.iov_base = (void *) buf;
1775         vec.iov_len = len;
1776
1777         return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1778 }
1779
1780 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1781                                       unsigned long adr, int len, void *thunk)
1782 {
1783         struct cfi_private *cfi = map->fldrv_priv;
1784         map_word status;
1785         int retries = 3;
1786         int ret;
1787
1788         adr += chip->start;
1789
1790  retry:
1791         spin_lock(chip->mutex);
1792         ret = get_chip(map, chip, adr, FL_ERASING);
1793         if (ret) {
1794                 spin_unlock(chip->mutex);
1795                 return ret;
1796         }
1797
1798         XIP_INVAL_CACHED_RANGE(map, adr, len);
1799         ENABLE_VPP(map);
1800         xip_disable(map, chip, adr);
1801
1802         /* Clear the status register first */
1803         map_write(map, CMD(0x50), adr);
1804
1805         /* Now erase */
1806         map_write(map, CMD(0x20), adr);
1807         map_write(map, CMD(0xD0), adr);
1808         chip->state = FL_ERASING;
1809         chip->erase_suspended = 0;
1810
1811         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1812                                    adr, len,
1813                                    chip->erase_time);
1814         if (ret) {
1815                 map_write(map, CMD(0x70), adr);
1816                 chip->state = FL_STATUS;
1817                 xip_enable(map, chip, adr);
1818                 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1819                 goto out;
1820         }
1821
1822         /* We've broken this before. It doesn't hurt to be safe */
1823         map_write(map, CMD(0x70), adr);
1824         chip->state = FL_STATUS;
1825         status = map_read(map, adr);
1826
1827         /* check for errors */
1828         if (map_word_bitsset(map, status, CMD(0x3a))) {
1829                 unsigned long chipstatus = MERGESTATUS(status);
1830
1831                 /* Reset the error bits */
1832                 map_write(map, CMD(0x50), adr);
1833                 map_write(map, CMD(0x70), adr);
1834                 xip_enable(map, chip, adr);
1835
1836                 if ((chipstatus & 0x30) == 0x30) {
1837                         printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1838                         ret = -EINVAL;
1839                 } else if (chipstatus & 0x02) {
1840                         /* Protection bit set */
1841                         ret = -EROFS;
1842                 } else if (chipstatus & 0x8) {
1843                         /* Voltage */
1844                         printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1845                         ret = -EIO;
1846                 } else if (chipstatus & 0x20 && retries--) {
1847                         printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1848                         put_chip(map, chip, adr);
1849                         spin_unlock(chip->mutex);
1850                         goto retry;
1851                 } else {
1852                         printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1853                         ret = -EIO;
1854                 }
1855
1856                 goto out;
1857         }
1858
1859         xip_enable(map, chip, adr);
1860  out:   put_chip(map, chip, adr);
1861         spin_unlock(chip->mutex);
1862         return ret;
1863 }
1864
1865 static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1866 {
1867         unsigned long ofs, len;
1868         int ret;
1869
1870         ofs = instr->addr;
1871         len = instr->len;
1872
1873         ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1874         if (ret)
1875                 return ret;
1876
1877         instr->state = MTD_ERASE_DONE;
1878         mtd_erase_callback(instr);
1879
1880         return 0;
1881 }
1882
1883 static void cfi_intelext_sync (struct mtd_info *mtd)
1884 {
1885         struct map_info *map = mtd->priv;
1886         struct cfi_private *cfi = map->fldrv_priv;
1887         int i;
1888         struct flchip *chip;
1889         int ret = 0;
1890
1891         for (i=0; !ret && i<cfi->numchips; i++) {
1892                 chip = &cfi->chips[i];
1893
1894                 spin_lock(chip->mutex);
1895                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1896
1897                 if (!ret) {
1898                         chip->oldstate = chip->state;
1899                         chip->state = FL_SYNCING;
1900                         /* No need to wake_up() on this state change -
1901                          * as the whole point is that nobody can do anything
1902                          * with the chip now anyway.
1903                          */
1904                 }
1905                 spin_unlock(chip->mutex);
1906         }
1907
1908         /* Unlock the chips again */
1909
1910         for (i--; i >=0; i--) {
1911                 chip = &cfi->chips[i];
1912
1913                 spin_lock(chip->mutex);
1914
1915                 if (chip->state == FL_SYNCING) {
1916                         chip->state = chip->oldstate;
1917                         chip->oldstate = FL_READY;
1918                         wake_up(&chip->wq);
1919                 }
1920                 spin_unlock(chip->mutex);
1921         }
1922 }
1923
1924 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
1925                                                 struct flchip *chip,
1926                                                 unsigned long adr,
1927                                                 int len, void *thunk)
1928 {
1929         struct cfi_private *cfi = map->fldrv_priv;
1930         int status, ofs_factor = cfi->interleave * cfi->device_type;
1931
1932         adr += chip->start;
1933         xip_disable(map, chip, adr+(2*ofs_factor));
1934         map_write(map, CMD(0x90), adr+(2*ofs_factor));
1935         chip->state = FL_JEDEC_QUERY;
1936         status = cfi_read_query(map, adr+(2*ofs_factor));
1937         xip_enable(map, chip, 0);
1938         return status;
1939 }
1940
1941 #ifdef DEBUG_LOCK_BITS
1942 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1943                                                 struct flchip *chip,
1944                                                 unsigned long adr,
1945                                                 int len, void *thunk)
1946 {
1947         printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1948                adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
1949         return 0;
1950 }
1951 #endif
1952
1953 #define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
1954 #define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
1955
1956 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1957                                        unsigned long adr, int len, void *thunk)
1958 {
1959         struct cfi_private *cfi = map->fldrv_priv;
1960         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1961         int udelay;
1962         int ret;
1963
1964         adr += chip->start;
1965
1966         spin_lock(chip->mutex);
1967         ret = get_chip(map, chip, adr, FL_LOCKING);
1968         if (ret) {
1969                 spin_unlock(chip->mutex);
1970                 return ret;
1971         }
1972
1973         ENABLE_VPP(map);
1974         xip_disable(map, chip, adr);
1975
1976         map_write(map, CMD(0x60), adr);
1977         if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1978                 map_write(map, CMD(0x01), adr);
1979                 chip->state = FL_LOCKING;
1980         } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1981                 map_write(map, CMD(0xD0), adr);
1982                 chip->state = FL_UNLOCKING;
1983         } else
1984                 BUG();
1985
1986         /*
1987          * If Instant Individual Block Locking supported then no need
1988          * to delay.
1989          */
1990         udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
1991
1992         ret = WAIT_TIMEOUT(map, chip, adr, udelay);
1993         if (ret) {
1994                 map_write(map, CMD(0x70), adr);
1995                 chip->state = FL_STATUS;
1996                 xip_enable(map, chip, adr);
1997                 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
1998                 goto out;
1999         }
2000
2001         xip_enable(map, chip, adr);
2002 out:    put_chip(map, chip, adr);
2003         spin_unlock(chip->mutex);
2004         return ret;
2005 }
2006
2007 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
2008 {
2009         int ret;
2010
2011 #ifdef DEBUG_LOCK_BITS
2012         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2013                __FUNCTION__, ofs, len);
2014         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2015                 ofs, len, NULL);
2016 #endif
2017
2018         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2019                 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2020
2021 #ifdef DEBUG_LOCK_BITS
2022         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2023                __FUNCTION__, ret);
2024         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2025                 ofs, len, NULL);
2026 #endif
2027
2028         return ret;
2029 }
2030
2031 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
2032 {
2033         int ret;
2034
2035 #ifdef DEBUG_LOCK_BITS
2036         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2037                __FUNCTION__, ofs, len);
2038         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2039                 ofs, len, NULL);
2040 #endif
2041
2042         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2043                                         ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2044
2045 #ifdef DEBUG_LOCK_BITS
2046         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2047                __FUNCTION__, ret);
2048         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2049                 ofs, len, NULL);
2050 #endif
2051
2052         return ret;
2053 }
2054
2055 #ifdef CONFIG_MTD_OTP
2056
2057 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2058                         u_long data_offset, u_char *buf, u_int size,
2059                         u_long prot_offset, u_int groupno, u_int groupsize);
2060
2061 static int __xipram
2062 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2063             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2064 {
2065         struct cfi_private *cfi = map->fldrv_priv;
2066         int ret;
2067
2068         spin_lock(chip->mutex);
2069         ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2070         if (ret) {
2071                 spin_unlock(chip->mutex);
2072                 return ret;
2073         }
2074
2075         /* let's ensure we're not reading back cached data from array mode */
2076         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2077
2078         xip_disable(map, chip, chip->start);
2079         if (chip->state != FL_JEDEC_QUERY) {
2080                 map_write(map, CMD(0x90), chip->start);
2081                 chip->state = FL_JEDEC_QUERY;
2082         }
2083         map_copy_from(map, buf, chip->start + offset, size);
2084         xip_enable(map, chip, chip->start);
2085
2086         /* then ensure we don't keep OTP data in the cache */
2087         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2088
2089         put_chip(map, chip, chip->start);
2090         spin_unlock(chip->mutex);
2091         return 0;
2092 }
2093
2094 static int
2095 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2096              u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2097 {
2098         int ret;
2099
2100         while (size) {
2101                 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2102                 int gap = offset - bus_ofs;
2103                 int n = min_t(int, size, map_bankwidth(map)-gap);
2104                 map_word datum = map_word_ff(map);
2105
2106                 datum = map_word_load_partial(map, datum, buf, gap, n);
2107                 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2108                 if (ret)
2109                         return ret;
2110
2111                 offset += n;
2112                 buf += n;
2113                 size -= n;
2114         }
2115
2116         return 0;
2117 }
2118
2119 static int
2120 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2121             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2122 {
2123         struct cfi_private *cfi = map->fldrv_priv;
2124         map_word datum;
2125
2126         /* make sure area matches group boundaries */
2127         if (size != grpsz)
2128                 return -EXDEV;
2129
2130         datum = map_word_ff(map);
2131         datum = map_word_clr(map, datum, CMD(1 << grpno));
2132         return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2133 }
2134
2135 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2136                                  size_t *retlen, u_char *buf,
2137                                  otp_op_t action, int user_regs)
2138 {
2139         struct map_info *map = mtd->priv;
2140         struct cfi_private *cfi = map->fldrv_priv;
2141         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2142         struct flchip *chip;
2143         struct cfi_intelext_otpinfo *otp;
2144         u_long devsize, reg_prot_offset, data_offset;
2145         u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2146         u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2147         int ret;
2148
2149         *retlen = 0;
2150
2151         /* Check that we actually have some OTP registers */
2152         if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2153                 return -ENODATA;
2154
2155         /* we need real chips here not virtual ones */
2156         devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2157         chip_step = devsize >> cfi->chipshift;
2158         chip_num = 0;
2159
2160         /* Some chips have OTP located in the _top_ partition only.
2161            For example: Intel 28F256L18T (T means top-parameter device) */
2162         if (cfi->mfr == MANUFACTURER_INTEL) {
2163                 switch (cfi->id) {
2164                 case 0x880b:
2165                 case 0x880c:
2166                 case 0x880d:
2167                         chip_num = chip_step - 1;
2168                 }
2169         }
2170
2171         for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2172                 chip = &cfi->chips[chip_num];
2173                 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2174
2175                 /* first OTP region */
2176                 field = 0;
2177                 reg_prot_offset = extp->ProtRegAddr;
2178                 reg_fact_groups = 1;
2179                 reg_fact_size = 1 << extp->FactProtRegSize;
2180                 reg_user_groups = 1;
2181                 reg_user_size = 1 << extp->UserProtRegSize;
2182
2183                 while (len > 0) {
2184                         /* flash geometry fixup */
2185                         data_offset = reg_prot_offset + 1;
2186                         data_offset *= cfi->interleave * cfi->device_type;
2187                         reg_prot_offset *= cfi->interleave * cfi->device_type;
2188                         reg_fact_size *= cfi->interleave;
2189                         reg_user_size *= cfi->interleave;
2190
2191                         if (user_regs) {
2192                                 groups = reg_user_groups;
2193                                 groupsize = reg_user_size;
2194                                 /* skip over factory reg area */
2195                                 groupno = reg_fact_groups;
2196                                 data_offset += reg_fact_groups * reg_fact_size;
2197                         } else {
2198                                 groups = reg_fact_groups;
2199                                 groupsize = reg_fact_size;
2200                                 groupno = 0;
2201                         }
2202
2203                         while (len > 0 && groups > 0) {
2204                                 if (!action) {
2205                                         /*
2206                                          * Special case: if action is NULL
2207                                          * we fill buf with otp_info records.
2208                                          */
2209                                         struct otp_info *otpinfo;
2210                                         map_word lockword;
2211                                         len -= sizeof(struct otp_info);
2212                                         if (len <= 0)
2213                                                 return -ENOSPC;
2214                                         ret = do_otp_read(map, chip,
2215                                                           reg_prot_offset,
2216                                                           (u_char *)&lockword,
2217                                                           map_bankwidth(map),
2218                                                           0, 0,  0);
2219                                         if (ret)
2220                                                 return ret;
2221                                         otpinfo = (struct otp_info *)buf;
2222                                         otpinfo->start = from;
2223                                         otpinfo->length = groupsize;
2224                                         otpinfo->locked =
2225                                            !map_word_bitsset(map, lockword,
2226                                                              CMD(1 << groupno));
2227                                         from += groupsize;
2228                                         buf += sizeof(*otpinfo);
2229                                         *retlen += sizeof(*otpinfo);
2230                                 } else if (from >= groupsize) {
2231                                         from -= groupsize;
2232                                         data_offset += groupsize;
2233                                 } else {
2234                                         int size = groupsize;
2235                                         data_offset += from;
2236                                         size -= from;
2237                                         from = 0;
2238                                         if (size > len)
2239                                                 size = len;
2240                                         ret = action(map, chip, data_offset,
2241                                                      buf, size, reg_prot_offset,
2242                                                      groupno, groupsize);
2243                                         if (ret < 0)
2244                                                 return ret;
2245                                         buf += size;
2246                                         len -= size;
2247                                         *retlen += size;
2248                                         data_offset += size;
2249                                 }
2250                                 groupno++;
2251                                 groups--;
2252                         }
2253
2254                         /* next OTP region */
2255                         if (++field == extp->NumProtectionFields)
2256                                 break;
2257                         reg_prot_offset = otp->ProtRegAddr;
2258                         reg_fact_groups = otp->FactGroups;
2259                         reg_fact_size = 1 << otp->FactProtRegSize;
2260                         reg_user_groups = otp->UserGroups;
2261                         reg_user_size = 1 << otp->UserProtRegSize;
2262                         otp++;
2263                 }
2264         }
2265
2266         return 0;
2267 }
2268
2269 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2270                                            size_t len, size_t *retlen,
2271                                             u_char *buf)
2272 {
2273         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2274                                      buf, do_otp_read, 0);
2275 }
2276
2277 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2278                                            size_t len, size_t *retlen,
2279                                             u_char *buf)
2280 {
2281         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2282                                      buf, do_otp_read, 1);
2283 }
2284
2285 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2286                                             size_t len, size_t *retlen,
2287                                              u_char *buf)
2288 {
2289         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2290                                      buf, do_otp_write, 1);
2291 }
2292
2293 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2294                                            loff_t from, size_t len)
2295 {
2296         size_t retlen;
2297         return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2298                                      NULL, do_otp_lock, 1);
2299 }
2300
2301 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2302                                            struct otp_info *buf, size_t len)
2303 {
2304         size_t retlen;
2305         int ret;
2306
2307         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2308         return ret ? : retlen;
2309 }
2310
2311 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2312                                            struct otp_info *buf, size_t len)
2313 {
2314         size_t retlen;
2315         int ret;
2316
2317         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2318         return ret ? : retlen;
2319 }
2320
2321 #endif
2322
2323 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2324 {
2325         struct mtd_erase_region_info *region;
2326         int block, status, i;
2327         unsigned long adr;
2328         size_t len;
2329
2330         for (i = 0; i < mtd->numeraseregions; i++) {
2331                 region = &mtd->eraseregions[i];
2332                 if (!region->lockmap)
2333                         continue;
2334
2335                 for (block = 0; block < region->numblocks; block++){
2336                         len = region->erasesize;
2337                         adr = region->offset + block * len;
2338
2339                         status = cfi_varsize_frob(mtd,
2340                                         do_getlockstatus_oneblock, adr, len, NULL);
2341                         if (status)
2342                                 set_bit(block, region->lockmap);
2343                         else
2344                                 clear_bit(block, region->lockmap);
2345                 }
2346         }
2347 }
2348
2349 static int cfi_intelext_suspend(struct mtd_info *mtd)
2350 {
2351         struct map_info *map = mtd->priv;
2352         struct cfi_private *cfi = map->fldrv_priv;
2353         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2354         int i;
2355         struct flchip *chip;
2356         int ret = 0;
2357
2358         if ((mtd->flags & MTD_POWERUP_LOCK)
2359             && extp && (extp->FeatureSupport & (1 << 5)))
2360                 cfi_intelext_save_locks(mtd);
2361
2362         for (i=0; !ret && i<cfi->numchips; i++) {
2363                 chip = &cfi->chips[i];
2364
2365                 spin_lock(chip->mutex);
2366
2367                 switch (chip->state) {
2368                 case FL_READY:
2369                 case FL_STATUS:
2370                 case FL_CFI_QUERY:
2371                 case FL_JEDEC_QUERY:
2372                         if (chip->oldstate == FL_READY) {
2373                                 /* place the chip in a known state before suspend */
2374                                 map_write(map, CMD(0xFF), cfi->chips[i].start);
2375                                 chip->oldstate = chip->state;
2376                                 chip->state = FL_PM_SUSPENDED;
2377                                 /* No need to wake_up() on this state change -
2378                                  * as the whole point is that nobody can do anything
2379                                  * with the chip now anyway.
2380                                  */
2381                         } else {
2382                                 /* There seems to be an operation pending. We must wait for it. */
2383                                 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2384                                 ret = -EAGAIN;
2385                         }
2386                         break;
2387                 default:
2388                         /* Should we actually wait? Once upon a time these routines weren't
2389                            allowed to. Or should we return -EAGAIN, because the upper layers
2390                            ought to have already shut down anything which was using the device
2391                            anyway? The latter for now. */
2392                         printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2393                         ret = -EAGAIN;
2394                 case FL_PM_SUSPENDED:
2395                         break;
2396                 }
2397                 spin_unlock(chip->mutex);
2398         }
2399
2400         /* Unlock the chips again */
2401
2402         if (ret) {
2403                 for (i--; i >=0; i--) {
2404                         chip = &cfi->chips[i];
2405
2406                         spin_lock(chip->mutex);
2407
2408                         if (chip->state == FL_PM_SUSPENDED) {
2409                                 /* No need to force it into a known state here,
2410                                    because we're returning failure, and it didn't
2411                                    get power cycled */
2412                                 chip->state = chip->oldstate;
2413                                 chip->oldstate = FL_READY;
2414                                 wake_up(&chip->wq);
2415                         }
2416                         spin_unlock(chip->mutex);
2417                 }
2418         }
2419
2420         return ret;
2421 }
2422
2423 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2424 {
2425         struct mtd_erase_region_info *region;
2426         int block, i;
2427         unsigned long adr;
2428         size_t len;
2429
2430         for (i = 0; i < mtd->numeraseregions; i++) {
2431                 region = &mtd->eraseregions[i];
2432                 if (!region->lockmap)
2433                         continue;
2434
2435                 for (block = 0; block < region->numblocks; block++) {
2436                         len = region->erasesize;
2437                         adr = region->offset + block * len;
2438
2439                         if (!test_bit(block, region->lockmap))
2440                                 cfi_intelext_unlock(mtd, adr, len);
2441                 }
2442         }
2443 }
2444
2445 static void cfi_intelext_resume(struct mtd_info *mtd)
2446 {
2447         struct map_info *map = mtd->priv;
2448         struct cfi_private *cfi = map->fldrv_priv;
2449         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2450         int i;
2451         struct flchip *chip;
2452
2453         for (i=0; i<cfi->numchips; i++) {
2454
2455                 chip = &cfi->chips[i];
2456
2457                 spin_lock(chip->mutex);
2458
2459                 /* Go to known state. Chip may have been power cycled */
2460                 if (chip->state == FL_PM_SUSPENDED) {
2461                         map_write(map, CMD(0xFF), cfi->chips[i].start);
2462                         chip->oldstate = chip->state = FL_READY;
2463                         wake_up(&chip->wq);
2464                 }
2465
2466                 spin_unlock(chip->mutex);
2467         }
2468
2469         if ((mtd->flags & MTD_POWERUP_LOCK)
2470             && extp && (extp->FeatureSupport & (1 << 5)))
2471                 cfi_intelext_restore_locks(mtd);
2472 }
2473
2474 static int cfi_intelext_reset(struct mtd_info *mtd)
2475 {
2476         struct map_info *map = mtd->priv;
2477         struct cfi_private *cfi = map->fldrv_priv;
2478         int i, ret;
2479
2480         for (i=0; i < cfi->numchips; i++) {
2481                 struct flchip *chip = &cfi->chips[i];
2482
2483                 /* force the completion of any ongoing operation
2484                    and switch to array mode so any bootloader in
2485                    flash is accessible for soft reboot. */
2486                 spin_lock(chip->mutex);
2487                 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2488                 if (!ret) {
2489                         map_write(map, CMD(0xff), chip->start);
2490                         chip->state = FL_SHUTDOWN;
2491                 }
2492                 spin_unlock(chip->mutex);
2493         }
2494
2495         return 0;
2496 }
2497
2498 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2499                                void *v)
2500 {
2501         struct mtd_info *mtd;
2502
2503         mtd = container_of(nb, struct mtd_info, reboot_notifier);
2504         cfi_intelext_reset(mtd);
2505         return NOTIFY_DONE;
2506 }
2507
2508 static void cfi_intelext_destroy(struct mtd_info *mtd)
2509 {
2510         struct map_info *map = mtd->priv;
2511         struct cfi_private *cfi = map->fldrv_priv;
2512         struct mtd_erase_region_info *region;
2513         int i;
2514         cfi_intelext_reset(mtd);
2515         unregister_reboot_notifier(&mtd->reboot_notifier);
2516         kfree(cfi->cmdset_priv);
2517         kfree(cfi->cfiq);
2518         kfree(cfi->chips[0].priv);
2519         kfree(cfi);
2520         for (i = 0; i < mtd->numeraseregions; i++) {
2521                 region = &mtd->eraseregions[i];
2522                 if (region->lockmap)
2523                         kfree(region->lockmap);
2524         }
2525         kfree(mtd->eraseregions);
2526 }
2527
2528 MODULE_LICENSE("GPL");
2529 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2530 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2531 MODULE_ALIAS("cfi_cmdset_0003");
2532 MODULE_ALIAS("cfi_cmdset_0200");