Thomas Gleixner | 8116125 | 2019-05-20 19:08:14 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
frans | e6cf5df | 2008-08-15 23:14:31 +0200 | [diff] [blame] | 3 | * This file contains an ECC algorithm that detects and corrects 1 bit |
| 4 | * errors in a 256 byte block of data. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | * |
David Woodhouse | ccbcd6c | 2008-08-16 11:01:31 +0100 | [diff] [blame] | 6 | * Copyright © 2008 Koninklijke Philips Electronics NV. |
| 7 | * Author: Frans Meulenbroeks |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | * |
frans | e6cf5df | 2008-08-15 23:14:31 +0200 | [diff] [blame] | 9 | * Completely replaces the previous ECC implementation which was written by: |
| 10 | * Steven J. Hill (sjhill@realitydiluted.com) |
| 11 | * Thomas Gleixner (tglx@linutronix.de) |
| 12 | * |
| 13 | * Information on how this algorithm works and how it was developed |
David Woodhouse | ccbcd6c | 2008-08-16 11:01:31 +0100 | [diff] [blame] | 14 | * can be found in Documentation/mtd/nand_ecc.txt |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | */ |
| 16 | |
| 17 | #include <linux/types.h> |
| 18 | #include <linux/kernel.h> |
| 19 | #include <linux/module.h> |
Singh, Vimal | d68156c | 2008-08-23 18:18:34 +0200 | [diff] [blame] | 20 | #include <linux/mtd/mtd.h> |
Boris Brezillon | d4092d7 | 2017-08-04 17:29:10 +0200 | [diff] [blame] | 21 | #include <linux/mtd/rawnand.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #include <linux/mtd/nand_ecc.h> |
frans | 1077be5 | 2008-08-20 21:11:50 +0200 | [diff] [blame] | 23 | #include <asm/byteorder.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | |
| 25 | /* |
frans | e6cf5df | 2008-08-15 23:14:31 +0200 | [diff] [blame] | 26 | * invparity is a 256 byte table that contains the odd parity |
| 27 | * for each byte. So if the number of bits in a byte is even, |
| 28 | * the array element is 1, and when the number of bits is odd |
| 29 | * the array eleemnt is 0. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | */ |
frans | e6cf5df | 2008-08-15 23:14:31 +0200 | [diff] [blame] | 31 | static const char invparity[256] = { |
| 32 | 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, |
| 33 | 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, |
| 34 | 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, |
| 35 | 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, |
| 36 | 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, |
| 37 | 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, |
| 38 | 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, |
| 39 | 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, |
| 40 | 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, |
| 41 | 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, |
| 42 | 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, |
| 43 | 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, |
| 44 | 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, |
| 45 | 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, |
| 46 | 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, |
| 47 | 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1 |
| 48 | }; |
| 49 | |
| 50 | /* |
| 51 | * bitsperbyte contains the number of bits per byte |
| 52 | * this is only used for testing and repairing parity |
| 53 | * (a precalculated value slightly improves performance) |
| 54 | */ |
| 55 | static const char bitsperbyte[256] = { |
| 56 | 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, |
| 57 | 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, |
| 58 | 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, |
| 59 | 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, |
| 60 | 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, |
| 61 | 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, |
| 62 | 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, |
| 63 | 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, |
| 64 | 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, |
| 65 | 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, |
| 66 | 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, |
| 67 | 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, |
| 68 | 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, |
| 69 | 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, |
| 70 | 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, |
| 71 | 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8, |
| 72 | }; |
| 73 | |
| 74 | /* |
| 75 | * addressbits is a lookup table to filter out the bits from the xor-ed |
Brian Norris | 7854d3f | 2011-06-23 14:12:08 -0700 | [diff] [blame] | 76 | * ECC data that identify the faulty location. |
frans | e6cf5df | 2008-08-15 23:14:31 +0200 | [diff] [blame] | 77 | * this is only used for repairing parity |
| 78 | * see the comments in nand_correct_data for more details |
| 79 | */ |
| 80 | static const char addressbits[256] = { |
| 81 | 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01, |
| 82 | 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03, |
| 83 | 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01, |
| 84 | 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03, |
| 85 | 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05, |
| 86 | 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07, |
| 87 | 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05, |
| 88 | 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07, |
| 89 | 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01, |
| 90 | 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03, |
| 91 | 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01, |
| 92 | 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03, |
| 93 | 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05, |
| 94 | 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07, |
| 95 | 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05, |
| 96 | 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07, |
| 97 | 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09, |
| 98 | 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b, |
| 99 | 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09, |
| 100 | 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b, |
| 101 | 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d, |
| 102 | 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f, |
| 103 | 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d, |
| 104 | 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f, |
| 105 | 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09, |
| 106 | 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b, |
| 107 | 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09, |
| 108 | 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b, |
| 109 | 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d, |
| 110 | 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f, |
| 111 | 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d, |
| 112 | 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | }; |
| 114 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | /** |
Akinobu Mita | 1c63aca | 2009-10-22 16:53:32 +0900 | [diff] [blame] | 116 | * __nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte |
Singh, Vimal | d68156c | 2008-08-23 18:18:34 +0200 | [diff] [blame] | 117 | * block |
Alexey Korolev | 17c1d2be | 2008-08-20 22:32:08 +0100 | [diff] [blame] | 118 | * @buf: input buffer with raw data |
Brian Norris | 7854d3f | 2011-06-23 14:12:08 -0700 | [diff] [blame] | 119 | * @eccsize: data bytes per ECC step (256 or 512) |
Alexey Korolev | 17c1d2be | 2008-08-20 22:32:08 +0100 | [diff] [blame] | 120 | * @code: output buffer with ECC |
Boris Brezillon | 309600c | 2018-09-04 16:23:28 +0200 | [diff] [blame] | 121 | * @sm_order: Smart Media byte ordering |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 | */ |
Akinobu Mita | 1c63aca | 2009-10-22 16:53:32 +0900 | [diff] [blame] | 123 | void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize, |
Boris Brezillon | 309600c | 2018-09-04 16:23:28 +0200 | [diff] [blame] | 124 | unsigned char *code, bool sm_order) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | { |
Thomas Gleixner | 819d6a3 | 2006-05-23 11:32:45 +0200 | [diff] [blame] | 126 | int i; |
frans | e6cf5df | 2008-08-15 23:14:31 +0200 | [diff] [blame] | 127 | const uint32_t *bp = (uint32_t *)buf; |
Singh, Vimal | d68156c | 2008-08-23 18:18:34 +0200 | [diff] [blame] | 128 | /* 256 or 512 bytes/ecc */ |
Akinobu Mita | 1c63aca | 2009-10-22 16:53:32 +0900 | [diff] [blame] | 129 | const uint32_t eccsize_mult = eccsize >> 8; |
frans | e6cf5df | 2008-08-15 23:14:31 +0200 | [diff] [blame] | 130 | uint32_t cur; /* current value in buffer */ |
Singh, Vimal | d68156c | 2008-08-23 18:18:34 +0200 | [diff] [blame] | 131 | /* rp0..rp15..rp17 are the various accumulated parities (per byte) */ |
frans | e6cf5df | 2008-08-15 23:14:31 +0200 | [diff] [blame] | 132 | uint32_t rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7; |
Singh, Vimal | d68156c | 2008-08-23 18:18:34 +0200 | [diff] [blame] | 133 | uint32_t rp8, rp9, rp10, rp11, rp12, rp13, rp14, rp15, rp16; |
| 134 | uint32_t uninitialized_var(rp17); /* to make compiler happy */ |
frans | e6cf5df | 2008-08-15 23:14:31 +0200 | [diff] [blame] | 135 | uint32_t par; /* the cumulative parity for all data */ |
| 136 | uint32_t tmppar; /* the cumulative parity for this iteration; |
Singh, Vimal | d68156c | 2008-08-23 18:18:34 +0200 | [diff] [blame] | 137 | for rp12, rp14 and rp16 at the end of the |
| 138 | loop */ |
Thomas Gleixner | 61b03bd | 2005-11-07 11:15:49 +0000 | [diff] [blame] | 139 | |
frans | e6cf5df | 2008-08-15 23:14:31 +0200 | [diff] [blame] | 140 | par = 0; |
| 141 | rp4 = 0; |
| 142 | rp6 = 0; |
| 143 | rp8 = 0; |
| 144 | rp10 = 0; |
| 145 | rp12 = 0; |
| 146 | rp14 = 0; |
Singh, Vimal | d68156c | 2008-08-23 18:18:34 +0200 | [diff] [blame] | 147 | rp16 = 0; |
Thomas Gleixner | 61b03bd | 2005-11-07 11:15:49 +0000 | [diff] [blame] | 148 | |
frans | e6cf5df | 2008-08-15 23:14:31 +0200 | [diff] [blame] | 149 | /* |
| 150 | * The loop is unrolled a number of times; |
| 151 | * This avoids if statements to decide on which rp value to update |
| 152 | * Also we process the data by longwords. |
| 153 | * Note: passing unaligned data might give a performance penalty. |
| 154 | * It is assumed that the buffers are aligned. |
| 155 | * tmppar is the cumulative sum of this iteration. |
Singh, Vimal | d68156c | 2008-08-23 18:18:34 +0200 | [diff] [blame] | 156 | * needed for calculating rp12, rp14, rp16 and par |
frans | e6cf5df | 2008-08-15 23:14:31 +0200 | [diff] [blame] | 157 | * also used as a performance improvement for rp6, rp8 and rp10 |
| 158 | */ |
Singh, Vimal | d68156c | 2008-08-23 18:18:34 +0200 | [diff] [blame] | 159 | for (i = 0; i < eccsize_mult << 2; i++) { |
frans | e6cf5df | 2008-08-15 23:14:31 +0200 | [diff] [blame] | 160 | cur = *bp++; |
| 161 | tmppar = cur; |
| 162 | rp4 ^= cur; |
| 163 | cur = *bp++; |
| 164 | tmppar ^= cur; |
| 165 | rp6 ^= tmppar; |
| 166 | cur = *bp++; |
| 167 | tmppar ^= cur; |
| 168 | rp4 ^= cur; |
| 169 | cur = *bp++; |
| 170 | tmppar ^= cur; |
| 171 | rp8 ^= tmppar; |
Thomas Gleixner | 61b03bd | 2005-11-07 11:15:49 +0000 | [diff] [blame] | 172 | |
frans | e6cf5df | 2008-08-15 23:14:31 +0200 | [diff] [blame] | 173 | cur = *bp++; |
| 174 | tmppar ^= cur; |
| 175 | rp4 ^= cur; |
| 176 | rp6 ^= cur; |
| 177 | cur = *bp++; |
| 178 | tmppar ^= cur; |
| 179 | rp6 ^= cur; |
| 180 | cur = *bp++; |
| 181 | tmppar ^= cur; |
| 182 | rp4 ^= cur; |
| 183 | cur = *bp++; |
| 184 | tmppar ^= cur; |
| 185 | rp10 ^= tmppar; |
| 186 | |
| 187 | cur = *bp++; |
| 188 | tmppar ^= cur; |
| 189 | rp4 ^= cur; |
| 190 | rp6 ^= cur; |
| 191 | rp8 ^= cur; |
| 192 | cur = *bp++; |
| 193 | tmppar ^= cur; |
| 194 | rp6 ^= cur; |
| 195 | rp8 ^= cur; |
| 196 | cur = *bp++; |
| 197 | tmppar ^= cur; |
| 198 | rp4 ^= cur; |
| 199 | rp8 ^= cur; |
| 200 | cur = *bp++; |
| 201 | tmppar ^= cur; |
| 202 | rp8 ^= cur; |
| 203 | |
| 204 | cur = *bp++; |
| 205 | tmppar ^= cur; |
| 206 | rp4 ^= cur; |
| 207 | rp6 ^= cur; |
| 208 | cur = *bp++; |
| 209 | tmppar ^= cur; |
| 210 | rp6 ^= cur; |
| 211 | cur = *bp++; |
| 212 | tmppar ^= cur; |
| 213 | rp4 ^= cur; |
| 214 | cur = *bp++; |
| 215 | tmppar ^= cur; |
| 216 | |
| 217 | par ^= tmppar; |
| 218 | if ((i & 0x1) == 0) |
| 219 | rp12 ^= tmppar; |
| 220 | if ((i & 0x2) == 0) |
| 221 | rp14 ^= tmppar; |
Singh, Vimal | d68156c | 2008-08-23 18:18:34 +0200 | [diff] [blame] | 222 | if (eccsize_mult == 2 && (i & 0x4) == 0) |
| 223 | rp16 ^= tmppar; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | } |
Thomas Gleixner | 61b03bd | 2005-11-07 11:15:49 +0000 | [diff] [blame] | 225 | |
frans | e6cf5df | 2008-08-15 23:14:31 +0200 | [diff] [blame] | 226 | /* |
| 227 | * handle the fact that we use longword operations |
Singh, Vimal | d68156c | 2008-08-23 18:18:34 +0200 | [diff] [blame] | 228 | * we'll bring rp4..rp14..rp16 back to single byte entities by |
| 229 | * shifting and xoring first fold the upper and lower 16 bits, |
frans | e6cf5df | 2008-08-15 23:14:31 +0200 | [diff] [blame] | 230 | * then the upper and lower 8 bits. |
| 231 | */ |
| 232 | rp4 ^= (rp4 >> 16); |
| 233 | rp4 ^= (rp4 >> 8); |
| 234 | rp4 &= 0xff; |
| 235 | rp6 ^= (rp6 >> 16); |
| 236 | rp6 ^= (rp6 >> 8); |
| 237 | rp6 &= 0xff; |
| 238 | rp8 ^= (rp8 >> 16); |
| 239 | rp8 ^= (rp8 >> 8); |
| 240 | rp8 &= 0xff; |
| 241 | rp10 ^= (rp10 >> 16); |
| 242 | rp10 ^= (rp10 >> 8); |
| 243 | rp10 &= 0xff; |
| 244 | rp12 ^= (rp12 >> 16); |
| 245 | rp12 ^= (rp12 >> 8); |
| 246 | rp12 &= 0xff; |
| 247 | rp14 ^= (rp14 >> 16); |
| 248 | rp14 ^= (rp14 >> 8); |
| 249 | rp14 &= 0xff; |
Singh, Vimal | d68156c | 2008-08-23 18:18:34 +0200 | [diff] [blame] | 250 | if (eccsize_mult == 2) { |
| 251 | rp16 ^= (rp16 >> 16); |
| 252 | rp16 ^= (rp16 >> 8); |
| 253 | rp16 &= 0xff; |
| 254 | } |
Thomas Gleixner | 819d6a3 | 2006-05-23 11:32:45 +0200 | [diff] [blame] | 255 | |
frans | e6cf5df | 2008-08-15 23:14:31 +0200 | [diff] [blame] | 256 | /* |
| 257 | * we also need to calculate the row parity for rp0..rp3 |
| 258 | * This is present in par, because par is now |
frans | 1077be5 | 2008-08-20 21:11:50 +0200 | [diff] [blame] | 259 | * rp3 rp3 rp2 rp2 in little endian and |
| 260 | * rp2 rp2 rp3 rp3 in big endian |
frans | e6cf5df | 2008-08-15 23:14:31 +0200 | [diff] [blame] | 261 | * as well as |
frans | 1077be5 | 2008-08-20 21:11:50 +0200 | [diff] [blame] | 262 | * rp1 rp0 rp1 rp0 in little endian and |
| 263 | * rp0 rp1 rp0 rp1 in big endian |
frans | e6cf5df | 2008-08-15 23:14:31 +0200 | [diff] [blame] | 264 | * First calculate rp2 and rp3 |
frans | e6cf5df | 2008-08-15 23:14:31 +0200 | [diff] [blame] | 265 | */ |
frans | 1077be5 | 2008-08-20 21:11:50 +0200 | [diff] [blame] | 266 | #ifdef __BIG_ENDIAN |
| 267 | rp2 = (par >> 16); |
| 268 | rp2 ^= (rp2 >> 8); |
| 269 | rp2 &= 0xff; |
| 270 | rp3 = par & 0xffff; |
| 271 | rp3 ^= (rp3 >> 8); |
| 272 | rp3 &= 0xff; |
| 273 | #else |
frans | e6cf5df | 2008-08-15 23:14:31 +0200 | [diff] [blame] | 274 | rp3 = (par >> 16); |
| 275 | rp3 ^= (rp3 >> 8); |
| 276 | rp3 &= 0xff; |
| 277 | rp2 = par & 0xffff; |
| 278 | rp2 ^= (rp2 >> 8); |
| 279 | rp2 &= 0xff; |
frans | 1077be5 | 2008-08-20 21:11:50 +0200 | [diff] [blame] | 280 | #endif |
Thomas Gleixner | 61b03bd | 2005-11-07 11:15:49 +0000 | [diff] [blame] | 281 | |
frans | e6cf5df | 2008-08-15 23:14:31 +0200 | [diff] [blame] | 282 | /* reduce par to 16 bits then calculate rp1 and rp0 */ |
| 283 | par ^= (par >> 16); |
frans | 1077be5 | 2008-08-20 21:11:50 +0200 | [diff] [blame] | 284 | #ifdef __BIG_ENDIAN |
| 285 | rp0 = (par >> 8) & 0xff; |
| 286 | rp1 = (par & 0xff); |
| 287 | #else |
frans | e6cf5df | 2008-08-15 23:14:31 +0200 | [diff] [blame] | 288 | rp1 = (par >> 8) & 0xff; |
| 289 | rp0 = (par & 0xff); |
frans | 1077be5 | 2008-08-20 21:11:50 +0200 | [diff] [blame] | 290 | #endif |
frans | e6cf5df | 2008-08-15 23:14:31 +0200 | [diff] [blame] | 291 | |
| 292 | /* finally reduce par to 8 bits */ |
| 293 | par ^= (par >> 8); |
| 294 | par &= 0xff; |
| 295 | |
| 296 | /* |
Singh, Vimal | d68156c | 2008-08-23 18:18:34 +0200 | [diff] [blame] | 297 | * and calculate rp5..rp15..rp17 |
frans | e6cf5df | 2008-08-15 23:14:31 +0200 | [diff] [blame] | 298 | * note that par = rp4 ^ rp5 and due to the commutative property |
| 299 | * of the ^ operator we can say: |
| 300 | * rp5 = (par ^ rp4); |
| 301 | * The & 0xff seems superfluous, but benchmarking learned that |
| 302 | * leaving it out gives slightly worse results. No idea why, probably |
| 303 | * it has to do with the way the pipeline in pentium is organized. |
| 304 | */ |
| 305 | rp5 = (par ^ rp4) & 0xff; |
| 306 | rp7 = (par ^ rp6) & 0xff; |
| 307 | rp9 = (par ^ rp8) & 0xff; |
| 308 | rp11 = (par ^ rp10) & 0xff; |
| 309 | rp13 = (par ^ rp12) & 0xff; |
| 310 | rp15 = (par ^ rp14) & 0xff; |
Singh, Vimal | d68156c | 2008-08-23 18:18:34 +0200 | [diff] [blame] | 311 | if (eccsize_mult == 2) |
| 312 | rp17 = (par ^ rp16) & 0xff; |
frans | e6cf5df | 2008-08-15 23:14:31 +0200 | [diff] [blame] | 313 | |
| 314 | /* |
Brian Norris | 7854d3f | 2011-06-23 14:12:08 -0700 | [diff] [blame] | 315 | * Finally calculate the ECC bits. |
frans | e6cf5df | 2008-08-15 23:14:31 +0200 | [diff] [blame] | 316 | * Again here it might seem that there are performance optimisations |
| 317 | * possible, but benchmarks showed that on the system this is developed |
| 318 | * the code below is the fastest |
| 319 | */ |
Boris Brezillon | 309600c | 2018-09-04 16:23:28 +0200 | [diff] [blame] | 320 | if (sm_order) { |
| 321 | code[0] = (invparity[rp7] << 7) | (invparity[rp6] << 6) | |
| 322 | (invparity[rp5] << 5) | (invparity[rp4] << 4) | |
| 323 | (invparity[rp3] << 3) | (invparity[rp2] << 2) | |
| 324 | (invparity[rp1] << 1) | (invparity[rp0]); |
| 325 | code[1] = (invparity[rp15] << 7) | (invparity[rp14] << 6) | |
| 326 | (invparity[rp13] << 5) | (invparity[rp12] << 4) | |
| 327 | (invparity[rp11] << 3) | (invparity[rp10] << 2) | |
| 328 | (invparity[rp9] << 1) | (invparity[rp8]); |
| 329 | } else { |
| 330 | code[1] = (invparity[rp7] << 7) | (invparity[rp6] << 6) | |
| 331 | (invparity[rp5] << 5) | (invparity[rp4] << 4) | |
| 332 | (invparity[rp3] << 3) | (invparity[rp2] << 2) | |
| 333 | (invparity[rp1] << 1) | (invparity[rp0]); |
| 334 | code[0] = (invparity[rp15] << 7) | (invparity[rp14] << 6) | |
| 335 | (invparity[rp13] << 5) | (invparity[rp12] << 4) | |
| 336 | (invparity[rp11] << 3) | (invparity[rp10] << 2) | |
| 337 | (invparity[rp9] << 1) | (invparity[rp8]); |
| 338 | } |
| 339 | |
Singh, Vimal | d68156c | 2008-08-23 18:18:34 +0200 | [diff] [blame] | 340 | if (eccsize_mult == 1) |
| 341 | code[2] = |
| 342 | (invparity[par & 0xf0] << 7) | |
| 343 | (invparity[par & 0x0f] << 6) | |
| 344 | (invparity[par & 0xcc] << 5) | |
| 345 | (invparity[par & 0x33] << 4) | |
| 346 | (invparity[par & 0xaa] << 3) | |
| 347 | (invparity[par & 0x55] << 2) | |
| 348 | 3; |
| 349 | else |
| 350 | code[2] = |
| 351 | (invparity[par & 0xf0] << 7) | |
| 352 | (invparity[par & 0x0f] << 6) | |
| 353 | (invparity[par & 0xcc] << 5) | |
| 354 | (invparity[par & 0x33] << 4) | |
| 355 | (invparity[par & 0xaa] << 3) | |
| 356 | (invparity[par & 0x55] << 2) | |
| 357 | (invparity[rp17] << 1) | |
| 358 | (invparity[rp16] << 0); |
Akinobu Mita | 1c63aca | 2009-10-22 16:53:32 +0900 | [diff] [blame] | 359 | } |
| 360 | EXPORT_SYMBOL(__nand_calculate_ecc); |
| 361 | |
| 362 | /** |
| 363 | * nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte |
| 364 | * block |
Boris Brezillon | 767eb6f | 2018-09-06 14:05:21 +0200 | [diff] [blame] | 365 | * @chip: NAND chip object |
Akinobu Mita | 1c63aca | 2009-10-22 16:53:32 +0900 | [diff] [blame] | 366 | * @buf: input buffer with raw data |
| 367 | * @code: output buffer with ECC |
| 368 | */ |
Boris Brezillon | af37d2c | 2018-09-06 14:05:18 +0200 | [diff] [blame] | 369 | int nand_calculate_ecc(struct nand_chip *chip, const unsigned char *buf, |
Akinobu Mita | 1c63aca | 2009-10-22 16:53:32 +0900 | [diff] [blame] | 370 | unsigned char *code) |
| 371 | { |
Boris Brezillon | 309600c | 2018-09-04 16:23:28 +0200 | [diff] [blame] | 372 | bool sm_order = chip->ecc.options & NAND_ECC_SOFT_HAMMING_SM_ORDER; |
| 373 | |
| 374 | __nand_calculate_ecc(buf, chip->ecc.size, code, sm_order); |
Akinobu Mita | 1c63aca | 2009-10-22 16:53:32 +0900 | [diff] [blame] | 375 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 376 | return 0; |
| 377 | } |
Thomas Gleixner | 819d6a3 | 2006-05-23 11:32:45 +0200 | [diff] [blame] | 378 | EXPORT_SYMBOL(nand_calculate_ecc); |
| 379 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 380 | /** |
Atsushi Nemoto | be2f092 | 2009-09-05 01:20:43 +0900 | [diff] [blame] | 381 | * __nand_correct_data - [NAND Interface] Detect and correct bit error(s) |
Alexey Korolev | 17c1d2be | 2008-08-20 22:32:08 +0100 | [diff] [blame] | 382 | * @buf: raw data read from the chip |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 383 | * @read_ecc: ECC from the chip |
| 384 | * @calc_ecc: the ECC calculated from raw data |
Brian Norris | 7854d3f | 2011-06-23 14:12:08 -0700 | [diff] [blame] | 385 | * @eccsize: data bytes per ECC step (256 or 512) |
Boris Brezillon | 309600c | 2018-09-04 16:23:28 +0200 | [diff] [blame] | 386 | * @sm_order: Smart Media byte order |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 387 | * |
Atsushi Nemoto | be2f092 | 2009-09-05 01:20:43 +0900 | [diff] [blame] | 388 | * Detect and correct a 1 bit error for eccsize byte block |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 389 | */ |
Atsushi Nemoto | be2f092 | 2009-09-05 01:20:43 +0900 | [diff] [blame] | 390 | int __nand_correct_data(unsigned char *buf, |
| 391 | unsigned char *read_ecc, unsigned char *calc_ecc, |
Boris Brezillon | 309600c | 2018-09-04 16:23:28 +0200 | [diff] [blame] | 392 | unsigned int eccsize, bool sm_order) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 393 | { |
Vimal Singh | 260dc00 | 2009-02-23 13:46:08 +0530 | [diff] [blame] | 394 | unsigned char b0, b1, b2, bit_addr; |
| 395 | unsigned int byte_addr; |
Singh, Vimal | d68156c | 2008-08-23 18:18:34 +0200 | [diff] [blame] | 396 | /* 256 or 512 bytes/ecc */ |
Atsushi Nemoto | be2f092 | 2009-09-05 01:20:43 +0900 | [diff] [blame] | 397 | const uint32_t eccsize_mult = eccsize >> 8; |
Thomas Gleixner | 61b03bd | 2005-11-07 11:15:49 +0000 | [diff] [blame] | 398 | |
frans | e6cf5df | 2008-08-15 23:14:31 +0200 | [diff] [blame] | 399 | /* |
| 400 | * b0 to b2 indicate which bit is faulty (if any) |
| 401 | * we might need the xor result more than once, |
| 402 | * so keep them in a local var |
| 403 | */ |
Boris Brezillon | 309600c | 2018-09-04 16:23:28 +0200 | [diff] [blame] | 404 | if (sm_order) { |
| 405 | b0 = read_ecc[0] ^ calc_ecc[0]; |
| 406 | b1 = read_ecc[1] ^ calc_ecc[1]; |
| 407 | } else { |
| 408 | b0 = read_ecc[1] ^ calc_ecc[1]; |
| 409 | b1 = read_ecc[0] ^ calc_ecc[0]; |
| 410 | } |
| 411 | |
frans | e6cf5df | 2008-08-15 23:14:31 +0200 | [diff] [blame] | 412 | b2 = read_ecc[2] ^ calc_ecc[2]; |
Thomas Gleixner | 61b03bd | 2005-11-07 11:15:49 +0000 | [diff] [blame] | 413 | |
frans | e6cf5df | 2008-08-15 23:14:31 +0200 | [diff] [blame] | 414 | /* check if there are any bitfaults */ |
Thomas Gleixner | 819d6a3 | 2006-05-23 11:32:45 +0200 | [diff] [blame] | 415 | |
frans | e6cf5df | 2008-08-15 23:14:31 +0200 | [diff] [blame] | 416 | /* repeated if statements are slightly more efficient than switch ... */ |
| 417 | /* ordered in order of likelihood */ |
frans | 1077be5 | 2008-08-20 21:11:50 +0200 | [diff] [blame] | 418 | |
| 419 | if ((b0 | b1 | b2) == 0) |
David Woodhouse | ccbcd6c | 2008-08-16 11:01:31 +0100 | [diff] [blame] | 420 | return 0; /* no error */ |
frans | 1077be5 | 2008-08-20 21:11:50 +0200 | [diff] [blame] | 421 | |
| 422 | if ((((b0 ^ (b0 >> 1)) & 0x55) == 0x55) && |
| 423 | (((b1 ^ (b1 >> 1)) & 0x55) == 0x55) && |
Singh, Vimal | d68156c | 2008-08-23 18:18:34 +0200 | [diff] [blame] | 424 | ((eccsize_mult == 1 && ((b2 ^ (b2 >> 1)) & 0x54) == 0x54) || |
| 425 | (eccsize_mult == 2 && ((b2 ^ (b2 >> 1)) & 0x55) == 0x55))) { |
| 426 | /* single bit error */ |
frans | e6cf5df | 2008-08-15 23:14:31 +0200 | [diff] [blame] | 427 | /* |
Singh, Vimal | d68156c | 2008-08-23 18:18:34 +0200 | [diff] [blame] | 428 | * rp17/rp15/13/11/9/7/5/3/1 indicate which byte is the faulty |
| 429 | * byte, cp 5/3/1 indicate the faulty bit. |
frans | e6cf5df | 2008-08-15 23:14:31 +0200 | [diff] [blame] | 430 | * A lookup table (called addressbits) is used to filter |
| 431 | * the bits from the byte they are in. |
| 432 | * A marginal optimisation is possible by having three |
| 433 | * different lookup tables. |
| 434 | * One as we have now (for b0), one for b2 |
| 435 | * (that would avoid the >> 1), and one for b1 (with all values |
| 436 | * << 4). However it was felt that introducing two more tables |
| 437 | * hardly justify the gain. |
| 438 | * |
| 439 | * The b2 shift is there to get rid of the lowest two bits. |
| 440 | * We could also do addressbits[b2] >> 1 but for the |
André Goddard Rosa | af901ca | 2009-11-14 13:09:05 -0200 | [diff] [blame] | 441 | * performance it does not make any difference |
frans | e6cf5df | 2008-08-15 23:14:31 +0200 | [diff] [blame] | 442 | */ |
Singh, Vimal | d68156c | 2008-08-23 18:18:34 +0200 | [diff] [blame] | 443 | if (eccsize_mult == 1) |
| 444 | byte_addr = (addressbits[b1] << 4) + addressbits[b0]; |
| 445 | else |
| 446 | byte_addr = (addressbits[b2 & 0x3] << 8) + |
| 447 | (addressbits[b1] << 4) + addressbits[b0]; |
frans | e6cf5df | 2008-08-15 23:14:31 +0200 | [diff] [blame] | 448 | bit_addr = addressbits[b2 >> 2]; |
| 449 | /* flip the bit */ |
| 450 | buf[byte_addr] ^= (1 << bit_addr); |
David Woodhouse | ccbcd6c | 2008-08-16 11:01:31 +0100 | [diff] [blame] | 451 | return 1; |
frans | 1077be5 | 2008-08-20 21:11:50 +0200 | [diff] [blame] | 452 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 453 | } |
frans | 1077be5 | 2008-08-20 21:11:50 +0200 | [diff] [blame] | 454 | /* count nr of bits; use table lookup, faster than calculating it */ |
| 455 | if ((bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]) == 1) |
Brian Norris | 7854d3f | 2011-06-23 14:12:08 -0700 | [diff] [blame] | 456 | return 1; /* error in ECC data; no action needed */ |
frans | 1077be5 | 2008-08-20 21:11:50 +0200 | [diff] [blame] | 457 | |
Raphaël Poggi | 85a3bd9 | 2014-04-08 10:19:48 -0700 | [diff] [blame] | 458 | pr_err("%s: uncorrectable ECC error\n", __func__); |
Boris BREZILLON | 6e94119 | 2015-12-30 20:32:03 +0100 | [diff] [blame] | 459 | return -EBADMSG; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 460 | } |
Atsushi Nemoto | be2f092 | 2009-09-05 01:20:43 +0900 | [diff] [blame] | 461 | EXPORT_SYMBOL(__nand_correct_data); |
| 462 | |
| 463 | /** |
| 464 | * nand_correct_data - [NAND Interface] Detect and correct bit error(s) |
Boris Brezillon | 00da2ea | 2018-09-06 14:05:19 +0200 | [diff] [blame] | 465 | * @chip: NAND chip object |
Atsushi Nemoto | be2f092 | 2009-09-05 01:20:43 +0900 | [diff] [blame] | 466 | * @buf: raw data read from the chip |
| 467 | * @read_ecc: ECC from the chip |
| 468 | * @calc_ecc: the ECC calculated from raw data |
| 469 | * |
| 470 | * Detect and correct a 1 bit error for 256/512 byte block |
| 471 | */ |
Boris Brezillon | 00da2ea | 2018-09-06 14:05:19 +0200 | [diff] [blame] | 472 | int nand_correct_data(struct nand_chip *chip, unsigned char *buf, |
Atsushi Nemoto | be2f092 | 2009-09-05 01:20:43 +0900 | [diff] [blame] | 473 | unsigned char *read_ecc, unsigned char *calc_ecc) |
| 474 | { |
Boris Brezillon | 309600c | 2018-09-04 16:23:28 +0200 | [diff] [blame] | 475 | bool sm_order = chip->ecc.options & NAND_ECC_SOFT_HAMMING_SM_ORDER; |
| 476 | |
| 477 | return __nand_correct_data(buf, read_ecc, calc_ecc, chip->ecc.size, |
| 478 | sm_order); |
Atsushi Nemoto | be2f092 | 2009-09-05 01:20:43 +0900 | [diff] [blame] | 479 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 480 | EXPORT_SYMBOL(nand_correct_data); |
| 481 | |
| 482 | MODULE_LICENSE("GPL"); |
frans | e6cf5df | 2008-08-15 23:14:31 +0200 | [diff] [blame] | 483 | MODULE_AUTHOR("Frans Meulenbroeks <fransmeulenbroeks@gmail.com>"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 484 | MODULE_DESCRIPTION("Generic NAND ECC support"); |