|
33 | 33 | #include <linux/delay.h>
|
34 | 34 | #include <linux/interrupt.h>
|
35 | 35 | #include <linux/reboot.h>
|
| 36 | +#include <linux/of.h> |
| 37 | +#include <linux/of_platform.h> |
36 | 38 | #include <linux/mtd/map.h>
|
37 | 39 | #include <linux/mtd/mtd.h>
|
38 | 40 | #include <linux/mtd/cfi.h>
|
@@ -74,6 +76,10 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
|
74 | 76 | static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
|
75 | 77 | static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
|
76 | 78 |
|
| 79 | +static int cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); |
| 80 | +static int cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); |
| 81 | +static int cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len); |
| 82 | + |
77 | 83 | static struct mtd_chip_driver cfi_amdstd_chipdrv = {
|
78 | 84 | .probe = NULL, /* Not usable directly */
|
79 | 85 | .destroy = cfi_amdstd_destroy,
|
@@ -496,6 +502,7 @@ static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi)
|
496 | 502 | struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
|
497 | 503 | {
|
498 | 504 | struct cfi_private *cfi = map->fldrv_priv;
|
| 505 | + struct device_node __maybe_unused *np = map->device_node; |
499 | 506 | struct mtd_info *mtd;
|
500 | 507 | int i;
|
501 | 508 |
|
@@ -570,6 +577,17 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
|
570 | 577 | cfi_tell_features(extp);
|
571 | 578 | #endif
|
572 | 579 |
|
| 580 | +#ifdef CONFIG_OF |
| 581 | + if (np && of_property_read_bool( |
| 582 | + np, "use-advanced-sector-protection") |
| 583 | + && extp->BlkProtUnprot == 8) { |
| 584 | + printk(KERN_INFO " Advanced Sector Protection (PPB Locking) supported\n"); |
| 585 | + mtd->_lock = cfi_ppb_lock; |
| 586 | + mtd->_unlock = cfi_ppb_unlock; |
| 587 | + mtd->_is_locked = cfi_ppb_is_locked; |
| 588 | + } |
| 589 | +#endif |
| 590 | + |
573 | 591 | bootloc = extp->TopBottom;
|
574 | 592 | if ((bootloc < 2) || (bootloc > 5)) {
|
575 | 593 | printk(KERN_WARNING "%s: CFI contains unrecognised boot "
|
@@ -2172,6 +2190,205 @@ static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
|
2172 | 2190 | return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
|
2173 | 2191 | }
|
2174 | 2192 |
|
| 2193 | +/* |
| 2194 | + * Advanced Sector Protection - PPB (Persistent Protection Bit) locking |
| 2195 | + */ |
| 2196 | + |
| 2197 | +struct ppb_lock { |
| 2198 | + struct flchip *chip; |
| 2199 | + loff_t offset; |
| 2200 | + int locked; |
| 2201 | +}; |
| 2202 | + |
| 2203 | +#define MAX_SECTORS 512 |
| 2204 | + |
| 2205 | +#define DO_XXLOCK_ONEBLOCK_LOCK ((void *)1) |
| 2206 | +#define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *)2) |
| 2207 | +#define DO_XXLOCK_ONEBLOCK_GETLOCK ((void *)3) |
| 2208 | + |
| 2209 | +static int __maybe_unused do_ppb_xxlock(struct map_info *map, |
| 2210 | + struct flchip *chip, |
| 2211 | + unsigned long adr, int len, void *thunk) |
| 2212 | +{ |
| 2213 | + struct cfi_private *cfi = map->fldrv_priv; |
| 2214 | + unsigned long timeo; |
| 2215 | + int ret; |
| 2216 | + |
| 2217 | + mutex_lock(&chip->mutex); |
| 2218 | + ret = get_chip(map, chip, adr + chip->start, FL_LOCKING); |
| 2219 | + if (ret) { |
| 2220 | + mutex_unlock(&chip->mutex); |
| 2221 | + return ret; |
| 2222 | + } |
| 2223 | + |
| 2224 | + pr_debug("MTD %s(): XXLOCK 0x%08lx len %d\n", __func__, adr, len); |
| 2225 | + |
| 2226 | + cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, |
| 2227 | + cfi->device_type, NULL); |
| 2228 | + cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, |
| 2229 | + cfi->device_type, NULL); |
| 2230 | + /* PPB entry command */ |
| 2231 | + cfi_send_gen_cmd(0xC0, cfi->addr_unlock1, chip->start, map, cfi, |
| 2232 | + cfi->device_type, NULL); |
| 2233 | + |
| 2234 | + if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) { |
| 2235 | + chip->state = FL_LOCKING; |
| 2236 | + map_write(map, CMD(0xA0), chip->start + adr); |
| 2237 | + map_write(map, CMD(0x00), chip->start + adr); |
| 2238 | + } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) { |
| 2239 | + /* |
| 2240 | + * Unlocking of one specific sector is not supported, so we |
| 2241 | + * have to unlock all sectors of this device instead |
| 2242 | + */ |
| 2243 | + chip->state = FL_UNLOCKING; |
| 2244 | + map_write(map, CMD(0x80), chip->start); |
| 2245 | + map_write(map, CMD(0x30), chip->start); |
| 2246 | + } else if (thunk == DO_XXLOCK_ONEBLOCK_GETLOCK) { |
| 2247 | + chip->state = FL_JEDEC_QUERY; |
| 2248 | + /* Return locked status: 0->locked, 1->unlocked */ |
| 2249 | + ret = !cfi_read_query(map, adr); |
| 2250 | + } else |
| 2251 | + BUG(); |
| 2252 | + |
| 2253 | + /* |
| 2254 | + * Wait for some time as unlocking of all sectors takes quite long |
| 2255 | + */ |
| 2256 | + timeo = jiffies + msecs_to_jiffies(2000); /* 2s max (un)locking */ |
| 2257 | + for (;;) { |
| 2258 | + if (chip_ready(map, adr)) |
| 2259 | + break; |
| 2260 | + |
| 2261 | + if (time_after(jiffies, timeo)) { |
| 2262 | + printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); |
| 2263 | + ret = -EIO; |
| 2264 | + break; |
| 2265 | + } |
| 2266 | + |
| 2267 | + UDELAY(map, chip, adr, 1); |
| 2268 | + } |
| 2269 | + |
| 2270 | + /* Exit BC commands */ |
| 2271 | + map_write(map, CMD(0x90), chip->start); |
| 2272 | + map_write(map, CMD(0x00), chip->start); |
| 2273 | + |
| 2274 | + chip->state = FL_READY; |
| 2275 | + put_chip(map, chip, adr + chip->start); |
| 2276 | + mutex_unlock(&chip->mutex); |
| 2277 | + |
| 2278 | + return ret; |
| 2279 | +} |
| 2280 | + |
| 2281 | +static int __maybe_unused cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, |
| 2282 | + uint64_t len) |
| 2283 | +{ |
| 2284 | + return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len, |
| 2285 | + DO_XXLOCK_ONEBLOCK_LOCK); |
| 2286 | +} |
| 2287 | + |
| 2288 | +static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, |
| 2289 | + uint64_t len) |
| 2290 | +{ |
| 2291 | + struct mtd_erase_region_info *regions = mtd->eraseregions; |
| 2292 | + struct map_info *map = mtd->priv; |
| 2293 | + struct cfi_private *cfi = map->fldrv_priv; |
| 2294 | + struct ppb_lock *sect; |
| 2295 | + unsigned long adr; |
| 2296 | + loff_t offset; |
| 2297 | + uint64_t length; |
| 2298 | + int chipnum; |
| 2299 | + int i; |
| 2300 | + int sectors; |
| 2301 | + int ret; |
| 2302 | + |
| 2303 | + /* |
| 2304 | + * PPB unlocking always unlocks all sectors of the flash chip. |
| 2305 | + * We need to re-lock all previously locked sectors. So lets |
| 2306 | + * first check the locking status of all sectors and save |
| 2307 | + * it for future use. |
| 2308 | + */ |
| 2309 | + sect = kzalloc(MAX_SECTORS * sizeof(struct ppb_lock), GFP_KERNEL); |
| 2310 | + if (!sect) |
| 2311 | + return -ENOMEM; |
| 2312 | + |
| 2313 | + /* |
| 2314 | + * This code to walk all sectors is a slightly modified version |
| 2315 | + * of the cfi_varsize_frob() code. |
| 2316 | + */ |
| 2317 | + i = 0; |
| 2318 | + chipnum = 0; |
| 2319 | + adr = 0; |
| 2320 | + sectors = 0; |
| 2321 | + offset = 0; |
| 2322 | + length = mtd->size; |
| 2323 | + |
| 2324 | + while (length) { |
| 2325 | + int size = regions[i].erasesize; |
| 2326 | + |
| 2327 | + /* |
| 2328 | + * Only test sectors that shall not be unlocked. The other |
| 2329 | + * sectors shall be unlocked, so lets keep their locking |
| 2330 | + * status at "unlocked" (locked=0) for the final re-locking. |
| 2331 | + */ |
| 2332 | + if ((adr < ofs) || (adr >= (ofs + len))) { |
| 2333 | + sect[sectors].chip = &cfi->chips[chipnum]; |
| 2334 | + sect[sectors].offset = offset; |
| 2335 | + sect[sectors].locked = do_ppb_xxlock( |
| 2336 | + map, &cfi->chips[chipnum], adr, 0, |
| 2337 | + DO_XXLOCK_ONEBLOCK_GETLOCK); |
| 2338 | + } |
| 2339 | + |
| 2340 | + adr += size; |
| 2341 | + offset += size; |
| 2342 | + length -= size; |
| 2343 | + |
| 2344 | + if (offset == regions[i].offset + size * regions[i].numblocks) |
| 2345 | + i++; |
| 2346 | + |
| 2347 | + if (adr >> cfi->chipshift) { |
| 2348 | + adr = 0; |
| 2349 | + chipnum++; |
| 2350 | + |
| 2351 | + if (chipnum >= cfi->numchips) |
| 2352 | + break; |
| 2353 | + } |
| 2354 | + |
| 2355 | + sectors++; |
| 2356 | + if (sectors >= MAX_SECTORS) { |
| 2357 | + printk(KERN_ERR "Only %d sectors for PPB locking supported!\n", |
| 2358 | + MAX_SECTORS); |
| 2359 | + kfree(sect); |
| 2360 | + return -EINVAL; |
| 2361 | + } |
| 2362 | + } |
| 2363 | + |
| 2364 | + /* Now unlock the whole chip */ |
| 2365 | + ret = cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len, |
| 2366 | + DO_XXLOCK_ONEBLOCK_UNLOCK); |
| 2367 | + if (ret) { |
| 2368 | + kfree(sect); |
| 2369 | + return ret; |
| 2370 | + } |
| 2371 | + |
| 2372 | + /* |
| 2373 | + * PPB unlocking always unlocks all sectors of the flash chip. |
| 2374 | + * We need to re-lock all previously locked sectors. |
| 2375 | + */ |
| 2376 | + for (i = 0; i < sectors; i++) { |
| 2377 | + if (sect[i].locked) |
| 2378 | + do_ppb_xxlock(map, sect[i].chip, sect[i].offset, 0, |
| 2379 | + DO_XXLOCK_ONEBLOCK_LOCK); |
| 2380 | + } |
| 2381 | + |
| 2382 | + kfree(sect); |
| 2383 | + return ret; |
| 2384 | +} |
| 2385 | + |
| 2386 | +static int __maybe_unused cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, |
| 2387 | + uint64_t len) |
| 2388 | +{ |
| 2389 | + return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len, |
| 2390 | + DO_XXLOCK_ONEBLOCK_GETLOCK) ? 1 : 0; |
| 2391 | +} |
2175 | 2392 |
|
2176 | 2393 | static void cfi_amdstd_sync (struct mtd_info *mtd)
|
2177 | 2394 | {
|
|
0 commit comments