scan.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946
  1. /*
  2. * JFFS2 -- Journalling Flash File System, Version 2.
  3. *
  4. * Copyright (C) 2001-2003 Red Hat, Inc.
  5. *
  6. * Created by David Woodhouse <dwmw2@infradead.org>
  7. *
  8. * For licensing information, see the file 'LICENCE' in this directory.
  9. *
  10. * $Id: scan.c,v 1.121 2005/07/20 15:32:28 dedekind Exp $
  11. *
  12. */
  13. #include <linux/kernel.h>
  14. #include <linux/sched.h>
  15. #include <linux/slab.h>
  16. #include <linux/mtd/mtd.h>
  17. #include <linux/pagemap.h>
  18. #include <linux/crc32.h>
  19. #include <linux/compiler.h>
  20. #include "nodelist.h"
  21. #define DEFAULT_EMPTY_SCAN_SIZE 1024
  22. #if defined (__GNUC__)
  23. #elif defined (MSVC)
  24. #define typeof(x) uint32_t
  25. #else
  26. #endif
  27. #define DIRTY_SPACE(x) do { typeof(x) _x = (x); \
  28. c->free_size -= _x; c->dirty_size += _x; \
  29. jeb->free_size -= _x ; jeb->dirty_size += _x; \
  30. }while(0)
  31. #define USED_SPACE(x) do { typeof(x) _x = (x); \
  32. c->free_size -= _x; c->used_size += _x; \
  33. jeb->free_size -= _x ; jeb->used_size += _x; \
  34. }while(0)
  35. #define UNCHECKED_SPACE(x) do { typeof(x) _x = (x); \
  36. c->free_size -= _x; c->unchecked_size += _x; \
  37. jeb->free_size -= _x ; jeb->unchecked_size += _x; \
  38. }while(0)
  39. #if defined (__GNUC__)
  40. #define noisy_printk(noise, args...) do { \
  41. if (*(noise)) { \
  42. printk(KERN_NOTICE args); \
  43. (*(noise))--; \
  44. if (!(*(noise))) { \
  45. printk(KERN_NOTICE "Further such events for this erase block will not be printed\n"); \
  46. } \
  47. } \
  48. } while(0)
  49. #elif defined (MSVC)
  50. #define noisy_printk(noise, ...) do { \
  51. if (*(noise)) { \
  52. printk(KERN_NOTICE ##__VA_ARGS__); \
  53. (*(noise))--; \
  54. if (!(*(noise))) { \
  55. printk(KERN_NOTICE "Further such events for this erase block will not be printed\n"); \
  56. } \
  57. } \
  58. } while(0)
  59. #else
  60. #endif
  61. static uint32_t pseudo_random;
  62. static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  63. unsigned char *buf, uint32_t buf_size);
  64. /* These helper functions _must_ increase ofs and also do the dirty/used space accounting.
  65. * Returning an error will abort the mount - bad checksums etc. should just mark the space
  66. * as dirty.
  67. */
  68. static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  69. struct jffs2_raw_inode *ri, uint32_t ofs);
  70. static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  71. struct jffs2_raw_dirent *rd, uint32_t ofs);
  72. #define BLK_STATE_ALLFF 0
  73. #define BLK_STATE_CLEAN 1
  74. #define BLK_STATE_PARTDIRTY 2
  75. #define BLK_STATE_CLEANMARKER 3
  76. #define BLK_STATE_ALLDIRTY 4
  77. #define BLK_STATE_BADBLOCK 5
  78. static inline int min_free(struct jffs2_sb_info *c)
  79. {
  80. uint32_t min = 2 * sizeof(struct jffs2_raw_inode);
  81. #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
  82. if (!jffs2_can_mark_obsolete(c) && min < c->wbuf_pagesize)
  83. return c->wbuf_pagesize;
  84. #endif
  85. return min;
  86. }
  87. static inline uint32_t EMPTY_SCAN_SIZE(uint32_t sector_size) {
  88. if (sector_size < DEFAULT_EMPTY_SCAN_SIZE)
  89. return sector_size;
  90. else
  91. return DEFAULT_EMPTY_SCAN_SIZE;
  92. }
  93. int jffs2_scan_medium(struct jffs2_sb_info *c)
  94. {
  95. int i, ret;
  96. uint32_t empty_blocks = 0, bad_blocks = 0;
  97. unsigned char *flashbuf = NULL;
  98. uint32_t buf_size = 0;
  99. #ifndef __ECOS
  100. size_t pointlen;
  101. if (c->mtd->point) {
  102. ret = c->mtd->point (c->mtd, 0, c->mtd->size, &pointlen, &flashbuf);
  103. if (!ret && pointlen < c->mtd->size) {
  104. /* Don't muck about if it won't let us point to the whole flash */
  105. D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", pointlen));
  106. c->mtd->unpoint(c->mtd, flashbuf, 0, c->mtd->size);
  107. flashbuf = NULL;
  108. }
  109. if (ret)
  110. D1(printk(KERN_DEBUG "MTD point failed %d\n", ret));
  111. }
  112. #endif
  113. if (!flashbuf) {
  114. /* For NAND it's quicker to read a whole eraseblock at a time,
  115. apparently */
  116. if (jffs2_cleanmarker_oob(c))
  117. buf_size = c->sector_size;
  118. else
  119. buf_size = PAGE_SIZE;
  120. /* Respect kmalloc limitations */
  121. if (buf_size > 128*1024)
  122. buf_size = 128*1024;
  123. D1(printk(KERN_DEBUG "Allocating readbuf of %d bytes\n", buf_size));
  124. flashbuf = kmalloc(buf_size, GFP_KERNEL);
  125. if (!flashbuf)
  126. return -ENOMEM;
  127. }
  128. for (i=0; i<c->nr_blocks; i++) {
  129. struct jffs2_eraseblock *jeb = &c->blocks[i];
  130. ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset), buf_size);
  131. if (ret < 0)
  132. goto out;
  133. jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
  134. /* Now decide which list to put it on */
  135. switch(ret) {
  136. case BLK_STATE_ALLFF:
  137. /*
  138. * Empty block. Since we can't be sure it
  139. * was entirely erased, we just queue it for erase
  140. * again. It will be marked as such when the erase
  141. * is complete. Meanwhile we still count it as empty
  142. * for later checks.
  143. */
  144. empty_blocks++;
  145. list_add(&jeb->list, &c->erase_pending_list);
  146. c->nr_erasing_blocks++;
  147. break;
  148. case BLK_STATE_CLEANMARKER:
  149. /* Only a CLEANMARKER node is valid */
  150. if (!jeb->dirty_size) {
  151. /* It's actually free */
  152. list_add(&jeb->list, &c->free_list);
  153. c->nr_free_blocks++;
  154. } else {
  155. /* Dirt */
  156. D1(printk(KERN_DEBUG "Adding all-dirty block at 0x%08x to erase_pending_list\n", jeb->offset));
  157. list_add(&jeb->list, &c->erase_pending_list);
  158. c->nr_erasing_blocks++;
  159. }
  160. break;
  161. case BLK_STATE_CLEAN:
  162. /* Full (or almost full) of clean data. Clean list */
  163. list_add(&jeb->list, &c->clean_list);
  164. break;
  165. case BLK_STATE_PARTDIRTY:
  166. /* Some data, but not full. Dirty list. */
  167. /* We want to remember the block with most free space
  168. and stick it in the 'nextblock' position to start writing to it. */
  169. if (jeb->free_size > min_free(c) &&
  170. (!c->nextblock || c->nextblock->free_size < jeb->free_size)) {
  171. /* Better candidate for the next writes to go to */
  172. if (c->nextblock) {
  173. c->nextblock->dirty_size += c->nextblock->free_size + c->nextblock->wasted_size;
  174. c->dirty_size += c->nextblock->free_size + c->nextblock->wasted_size;
  175. c->free_size -= c->nextblock->free_size;
  176. c->wasted_size -= c->nextblock->wasted_size;
  177. c->nextblock->free_size = c->nextblock->wasted_size = 0;
  178. if (VERYDIRTY(c, c->nextblock->dirty_size)) {
  179. list_add(&c->nextblock->list, &c->very_dirty_list);
  180. } else {
  181. list_add(&c->nextblock->list, &c->dirty_list);
  182. }
  183. }
  184. c->nextblock = jeb;
  185. } else {
  186. jeb->dirty_size += jeb->free_size + jeb->wasted_size;
  187. c->dirty_size += jeb->free_size + jeb->wasted_size;
  188. c->free_size -= jeb->free_size;
  189. c->wasted_size -= jeb->wasted_size;
  190. jeb->free_size = jeb->wasted_size = 0;
  191. if (VERYDIRTY(c, jeb->dirty_size)) {
  192. list_add(&jeb->list, &c->very_dirty_list);
  193. } else {
  194. list_add(&jeb->list, &c->dirty_list);
  195. }
  196. }
  197. break;
  198. case BLK_STATE_ALLDIRTY:
  199. /* Nothing valid - not even a clean marker. Needs erasing. */
  200. /* For now we just put it on the erasing list. We'll start the erases later */
  201. D1(printk(KERN_NOTICE "JFFS2: Erase block at 0x%08x is not formatted. It will be erased\n", jeb->offset));
  202. list_add(&jeb->list, &c->erase_pending_list);
  203. c->nr_erasing_blocks++;
  204. break;
  205. case BLK_STATE_BADBLOCK:
  206. D1(printk(KERN_NOTICE "JFFS2: Block at 0x%08x is bad\n", jeb->offset));
  207. list_add(&jeb->list, &c->bad_list);
  208. c->bad_size += c->sector_size;
  209. c->free_size -= c->sector_size;
  210. bad_blocks++;
  211. break;
  212. default:
  213. printk(KERN_WARNING "jffs2_scan_medium(): unknown block state\n");
  214. BUG();
  215. }
  216. }
  217. /* Nextblock dirty is always seen as wasted, because we cannot recycle it now */
  218. if (c->nextblock && (c->nextblock->dirty_size)) {
  219. c->nextblock->wasted_size += c->nextblock->dirty_size;
  220. c->wasted_size += c->nextblock->dirty_size;
  221. c->dirty_size -= c->nextblock->dirty_size;
  222. c->nextblock->dirty_size = 0;
  223. }
  224. #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
  225. if (!jffs2_can_mark_obsolete(c) && c->nextblock && (c->nextblock->free_size & (c->wbuf_pagesize-1))) {
  226. /* If we're going to start writing into a block which already
  227. contains data, and the end of the data isn't page-aligned,
  228. skip a little and align it. */
  229. uint32_t skip = c->nextblock->free_size & (c->wbuf_pagesize-1);
  230. D1(printk(KERN_DEBUG "jffs2_scan_medium(): Skipping %d bytes in nextblock to ensure page alignment\n",
  231. skip));
  232. c->nextblock->wasted_size += skip;
  233. c->wasted_size += skip;
  234. c->nextblock->free_size -= skip;
  235. c->free_size -= skip;
  236. }
  237. #endif
  238. if (c->nr_erasing_blocks) {
  239. if ( !c->used_size && ((c->nr_free_blocks+empty_blocks+bad_blocks)!= c->nr_blocks || bad_blocks == c->nr_blocks) ) {
  240. printk(KERN_NOTICE "Cowardly refusing to erase blocks on filesystem with no valid JFFS2 nodes\n");
  241. printk(KERN_NOTICE "empty_blocks %d, bad_blocks %d, c->nr_blocks %d\n",empty_blocks,bad_blocks,c->nr_blocks);
  242. ret = -EIO;
  243. goto out;
  244. }
  245. jffs2_erase_pending_trigger(c);
  246. }
  247. ret = 0;
  248. out:
  249. if (buf_size)
  250. kfree(flashbuf);
  251. #ifndef __ECOS
  252. else
  253. c->mtd->unpoint(c->mtd, flashbuf, 0, c->mtd->size);
  254. #endif
  255. return ret;
  256. }
  257. static int jffs2_fill_scan_buf (struct jffs2_sb_info *c, unsigned char *buf,
  258. uint32_t ofs, uint32_t len)
  259. {
  260. int ret;
  261. size_t retlen;
  262. ret = jffs2_flash_read(c, ofs, len, &retlen, buf);
  263. if (ret) {
  264. D1(printk(KERN_WARNING "mtd->read(0x%x bytes from 0x%x) returned %d\n", len, ofs, ret));
  265. return ret;
  266. }
  267. if (retlen < len) {
  268. D1(printk(KERN_WARNING "Read at 0x%x gave only 0x%zx bytes\n", ofs, retlen));
  269. return -EIO;
  270. }
  271. D2(printk(KERN_DEBUG "Read 0x%x bytes from 0x%08x into buf\n", len, ofs));
  272. D2(printk(KERN_DEBUG "000: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
  273. buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7], buf[8], buf[9], buf[10], buf[11], buf[12], buf[13], buf[14], buf[15]));
  274. return 0;
  275. }
  276. static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  277. unsigned char *buf, uint32_t buf_size) {
  278. struct jffs2_unknown_node *node;
  279. struct jffs2_unknown_node crcnode;
  280. uint32_t ofs, prevofs;
  281. uint32_t hdr_crc, buf_ofs, buf_len;
  282. int err;
  283. int noise = 0;
  284. #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
  285. int cleanmarkerfound = 0;
  286. #endif
  287. ofs = jeb->offset;
  288. prevofs = jeb->offset - 1;
  289. D1(printk(KERN_DEBUG "jffs2_scan_eraseblock(): Scanning block at 0x%x\n", ofs));
  290. #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
  291. if (jffs2_cleanmarker_oob(c)) {
  292. int ret = jffs2_check_nand_cleanmarker(c, jeb);
  293. D2(printk(KERN_NOTICE "jffs_check_nand_cleanmarker returned %d\n",ret));
  294. /* Even if it's not found, we still scan to see
  295. if the block is empty. We use this information
  296. to decide whether to erase it or not. */
  297. switch (ret) {
  298. case 0: cleanmarkerfound = 1; break;
  299. case 1: break;
  300. case 2: return BLK_STATE_BADBLOCK;
  301. case 3: return BLK_STATE_ALLDIRTY; /* Block has failed to erase min. once */
  302. default: return ret;
  303. }
  304. }
  305. #endif
  306. buf_ofs = jeb->offset;
  307. if (!buf_size) {
  308. buf_len = c->sector_size;
  309. } else {
  310. buf_len = EMPTY_SCAN_SIZE(c->sector_size);
  311. err = jffs2_fill_scan_buf(c, buf, buf_ofs, buf_len);
  312. if (err)
  313. return err;
  314. }
  315. /* We temporarily use 'ofs' as a pointer into the buffer/jeb */
  316. ofs = 0;
  317. /* Scan only 4KiB of 0xFF before declaring it's empty */
  318. while(ofs < EMPTY_SCAN_SIZE(c->sector_size) && *(uint32_t *)(&buf[ofs]) == 0xFFFFFFFF)
  319. ofs += 4;
  320. if (ofs == EMPTY_SCAN_SIZE(c->sector_size)) {
  321. #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
  322. if (jffs2_cleanmarker_oob(c)) {
  323. /* scan oob, take care of cleanmarker */
  324. int ret = jffs2_check_oob_empty(c, jeb, cleanmarkerfound);
  325. D2(printk(KERN_NOTICE "jffs2_check_oob_empty returned %d\n",ret));
  326. switch (ret) {
  327. case 0: return cleanmarkerfound ? BLK_STATE_CLEANMARKER : BLK_STATE_ALLFF;
  328. case 1: return BLK_STATE_ALLDIRTY;
  329. default: return ret;
  330. }
  331. }
  332. #endif
  333. D1(printk(KERN_DEBUG "Block at 0x%08x is empty (erased)\n", jeb->offset));
  334. if (c->cleanmarker_size == 0)
  335. return BLK_STATE_CLEANMARKER; /* don't bother with re-erase */
  336. else
  337. return BLK_STATE_ALLFF; /* OK to erase if all blocks are like this */
  338. }
  339. if (ofs) {
  340. D1(printk(KERN_DEBUG "Free space at %08x ends at %08x\n", jeb->offset,
  341. jeb->offset + ofs));
  342. DIRTY_SPACE(ofs);
  343. }
  344. /* Now ofs is a complete physical flash offset as it always was... */
  345. ofs += jeb->offset;
  346. noise = 10;
  347. scan_more:
  348. while(ofs < jeb->offset + c->sector_size) {
  349. jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
  350. cond_resched();
  351. if (ofs & 3) {
  352. printk(KERN_WARNING "Eep. ofs 0x%08x not word-aligned!\n", ofs);
  353. ofs = PAD(ofs);
  354. continue;
  355. }
  356. if (ofs == prevofs) {
  357. printk(KERN_WARNING "ofs 0x%08x has already been seen. Skipping\n", ofs);
  358. DIRTY_SPACE(4);
  359. ofs += 4;
  360. continue;
  361. }
  362. prevofs = ofs;
  363. if (jeb->offset + c->sector_size < ofs + sizeof(*node)) {
  364. D1(printk(KERN_DEBUG "Fewer than %zd bytes left to end of block. (%x+%x<%x+%zx) Not reading\n", sizeof(struct jffs2_unknown_node),
  365. jeb->offset, c->sector_size, ofs, sizeof(*node)));
  366. DIRTY_SPACE((jeb->offset + c->sector_size)-ofs);
  367. break;
  368. }
  369. if (buf_ofs + buf_len < ofs + sizeof(*node)) {
  370. buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
  371. D1(printk(KERN_DEBUG "Fewer than %zd bytes (node header) left to end of buf. Reading 0x%x at 0x%08x\n",
  372. sizeof(struct jffs2_unknown_node), buf_len, ofs));
  373. err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
  374. if (err)
  375. return err;
  376. buf_ofs = ofs;
  377. }
  378. node = (struct jffs2_unknown_node *)&buf[ofs-buf_ofs];
  379. if (*(uint32_t *)(&buf[ofs-buf_ofs]) == 0xffffffff) {
  380. uint32_t inbuf_ofs;
  381. uint32_t empty_start;
  382. empty_start = ofs;
  383. ofs += 4;
  384. D1(printk(KERN_DEBUG "Found empty flash at 0x%08x\n", ofs));
  385. more_empty:
  386. inbuf_ofs = ofs - buf_ofs;
  387. while (inbuf_ofs < buf_len) {
  388. if (*(uint32_t *)(&buf[inbuf_ofs]) != 0xffffffff) {
  389. printk(KERN_WARNING "Empty flash at 0x%08x ends at 0x%08x\n",
  390. empty_start, ofs);
  391. DIRTY_SPACE(ofs-empty_start);
  392. goto scan_more;
  393. }
  394. inbuf_ofs+=4;
  395. ofs += 4;
  396. }
  397. /* Ran off end. */
  398. D1(printk(KERN_DEBUG "Empty flash to end of buffer at 0x%08x\n", ofs));
  399. /* If we're only checking the beginning of a block with a cleanmarker,
  400. bail now */
  401. if (buf_ofs == jeb->offset && jeb->used_size == PAD(c->cleanmarker_size) &&
  402. c->cleanmarker_size && !jeb->dirty_size && !jeb->first_node->next_phys) {
  403. D1(printk(KERN_DEBUG "%d bytes at start of block seems clean... assuming all clean\n", EMPTY_SCAN_SIZE(c->sector_size)));
  404. return BLK_STATE_CLEANMARKER;
  405. }
  406. /* See how much more there is to read in this eraseblock... */
  407. buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
  408. if (!buf_len) {
  409. /* No more to read. Break out of main loop without marking
  410. this range of empty space as dirty (because it's not) */
  411. D1(printk(KERN_DEBUG "Empty flash at %08x runs to end of block. Treating as free_space\n",
  412. empty_start));
  413. break;
  414. }
  415. D1(printk(KERN_DEBUG "Reading another 0x%x at 0x%08x\n", buf_len, ofs));
  416. err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
  417. if (err)
  418. return err;
  419. buf_ofs = ofs;
  420. goto more_empty;
  421. }
  422. if (ofs == jeb->offset && je16_to_cpu(node->magic) == KSAMTIB_CIGAM_2SFFJ) {
  423. printk(KERN_WARNING "Magic bitmask is backwards at offset 0x%08x. Wrong endian filesystem?\n", ofs);
  424. DIRTY_SPACE(4);
  425. ofs += 4;
  426. continue;
  427. }
  428. if (je16_to_cpu(node->magic) == JFFS2_DIRTY_BITMASK) {
  429. D1(printk(KERN_DEBUG "Dirty bitmask at 0x%08x\n", ofs));
  430. DIRTY_SPACE(4);
  431. ofs += 4;
  432. continue;
  433. }
  434. if (je16_to_cpu(node->magic) == JFFS2_OLD_MAGIC_BITMASK) {
  435. printk(KERN_WARNING "Old JFFS2 bitmask found at 0x%08x\n", ofs);
  436. printk(KERN_WARNING "You cannot use older JFFS2 filesystems with newer kernels\n");
  437. DIRTY_SPACE(4);
  438. ofs += 4;
  439. continue;
  440. }
  441. if (je16_to_cpu(node->magic) != JFFS2_MAGIC_BITMASK) {
  442. /* OK. We're out of possibilities. Whinge and move on */
  443. noisy_printk(&noise, "jffs2_scan_eraseblock(): Magic bitmask 0x%04x not found at 0x%08x: 0x%04x instead\n",
  444. JFFS2_MAGIC_BITMASK, ofs,
  445. je16_to_cpu(node->magic));
  446. DIRTY_SPACE(4);
  447. ofs += 4;
  448. continue;
  449. }
  450. /* We seem to have a node of sorts. Check the CRC */
  451. crcnode.magic = node->magic;
  452. crcnode.nodetype = cpu_to_je16( je16_to_cpu(node->nodetype) | JFFS2_NODE_ACCURATE);
  453. crcnode.totlen = node->totlen;
  454. hdr_crc = crc32(0, &crcnode, sizeof(crcnode)-4);
  455. if (hdr_crc != je32_to_cpu(node->hdr_crc)) {
  456. noisy_printk(&noise, "jffs2_scan_eraseblock(): Node at 0x%08x {0x%04x, 0x%04x, 0x%08x) has invalid CRC 0x%08x (calculated 0x%08x)\n",
  457. ofs, je16_to_cpu(node->magic),
  458. je16_to_cpu(node->nodetype),
  459. je32_to_cpu(node->totlen),
  460. je32_to_cpu(node->hdr_crc),
  461. hdr_crc);
  462. DIRTY_SPACE(4);
  463. ofs += 4;
  464. continue;
  465. }
  466. if (ofs + je32_to_cpu(node->totlen) >
  467. jeb->offset + c->sector_size) {
  468. /* Eep. Node goes over the end of the erase block. */
  469. printk(KERN_WARNING "Node at 0x%08x with length 0x%08x would run over the end of the erase block\n",
  470. ofs, je32_to_cpu(node->totlen));
  471. printk(KERN_WARNING "Perhaps the file system was created with the wrong erase size?\n");
  472. DIRTY_SPACE(4);
  473. ofs += 4;
  474. continue;
  475. }
  476. if (!(je16_to_cpu(node->nodetype) & JFFS2_NODE_ACCURATE)) {
  477. /* Wheee. This is an obsoleted node */
  478. D2(printk(KERN_DEBUG "Node at 0x%08x is obsolete. Skipping\n", ofs));
  479. DIRTY_SPACE(PAD(je32_to_cpu(node->totlen)));
  480. ofs += PAD(je32_to_cpu(node->totlen));
  481. continue;
  482. }
  483. switch(je16_to_cpu(node->nodetype)) {
  484. case JFFS2_NODETYPE_INODE:
  485. if (buf_ofs + buf_len < ofs + sizeof(struct jffs2_raw_inode)) {
  486. buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
  487. D1(printk(KERN_DEBUG "Fewer than %zd bytes (inode node) left to end of buf. Reading 0x%x at 0x%08x\n",
  488. sizeof(struct jffs2_raw_inode), buf_len, ofs));
  489. err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
  490. if (err)
  491. return err;
  492. buf_ofs = ofs;
  493. node = (void *)buf;
  494. }
  495. err = jffs2_scan_inode_node(c, jeb, (void *)node, ofs);
  496. if (err) return err;
  497. ofs += PAD(je32_to_cpu(node->totlen));
  498. break;
  499. case JFFS2_NODETYPE_DIRENT:
  500. if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
  501. buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
  502. D1(printk(KERN_DEBUG "Fewer than %d bytes (dirent node) left to end of buf. Reading 0x%x at 0x%08x\n",
  503. je32_to_cpu(node->totlen), buf_len, ofs));
  504. err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
  505. if (err)
  506. return err;
  507. buf_ofs = ofs;
  508. node = (void *)buf;
  509. }
  510. err = jffs2_scan_dirent_node(c, jeb, (void *)node, ofs);
  511. if (err) return err;
  512. ofs += PAD(je32_to_cpu(node->totlen));
  513. break;
  514. case JFFS2_NODETYPE_CLEANMARKER:
  515. D1(printk(KERN_DEBUG "CLEANMARKER node found at 0x%08x\n", ofs));
  516. if (je32_to_cpu(node->totlen) != c->cleanmarker_size) {
  517. printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x has totlen 0x%x != normal 0x%x\n",
  518. ofs, je32_to_cpu(node->totlen), c->cleanmarker_size);
  519. DIRTY_SPACE(PAD(sizeof(struct jffs2_unknown_node)));
  520. ofs += PAD(sizeof(struct jffs2_unknown_node));
  521. } else if (jeb->first_node) {
  522. printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x, not first node in block (0x%08x)\n", ofs, jeb->offset);
  523. DIRTY_SPACE(PAD(sizeof(struct jffs2_unknown_node)));
  524. ofs += PAD(sizeof(struct jffs2_unknown_node));
  525. } else {
  526. struct jffs2_raw_node_ref *marker_ref = jffs2_alloc_raw_node_ref();
  527. if (!marker_ref) {
  528. printk(KERN_NOTICE "Failed to allocate node ref for clean marker\n");
  529. return -ENOMEM;
  530. }
  531. marker_ref->next_in_ino = NULL;
  532. marker_ref->next_phys = NULL;
  533. marker_ref->flash_offset = ofs | REF_NORMAL;
  534. marker_ref->__totlen = c->cleanmarker_size;
  535. jeb->first_node = jeb->last_node = marker_ref;
  536. USED_SPACE(PAD(c->cleanmarker_size));
  537. ofs += PAD(c->cleanmarker_size);
  538. }
  539. break;
  540. case JFFS2_NODETYPE_PADDING:
  541. DIRTY_SPACE(PAD(je32_to_cpu(node->totlen)));
  542. ofs += PAD(je32_to_cpu(node->totlen));
  543. break;
  544. default:
  545. switch (je16_to_cpu(node->nodetype) & JFFS2_COMPAT_MASK) {
  546. case JFFS2_FEATURE_ROCOMPAT:
  547. printk(KERN_NOTICE "Read-only compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs);
  548. c->flags |= JFFS2_SB_FLAG_RO;
  549. if (!(jffs2_is_readonly(c)))
  550. return -EROFS;
  551. DIRTY_SPACE(PAD(je32_to_cpu(node->totlen)));
  552. ofs += PAD(je32_to_cpu(node->totlen));
  553. break;
  554. case JFFS2_FEATURE_INCOMPAT:
  555. printk(KERN_NOTICE "Incompatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs);
  556. return -EINVAL;
  557. case JFFS2_FEATURE_RWCOMPAT_DELETE:
  558. D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs));
  559. DIRTY_SPACE(PAD(je32_to_cpu(node->totlen)));
  560. ofs += PAD(je32_to_cpu(node->totlen));
  561. break;
  562. case JFFS2_FEATURE_RWCOMPAT_COPY:
  563. D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs));
  564. USED_SPACE(PAD(je32_to_cpu(node->totlen)));
  565. ofs += PAD(je32_to_cpu(node->totlen));
  566. break;
  567. }
  568. }
  569. }
  570. D1(printk(KERN_DEBUG "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x\n", jeb->offset,
  571. jeb->free_size, jeb->dirty_size, jeb->unchecked_size, jeb->used_size));
  572. /* mark_node_obsolete can add to wasted !! */
  573. if (jeb->wasted_size) {
  574. jeb->dirty_size += jeb->wasted_size;
  575. c->dirty_size += jeb->wasted_size;
  576. c->wasted_size -= jeb->wasted_size;
  577. jeb->wasted_size = 0;
  578. }
  579. if ((jeb->used_size + jeb->unchecked_size) == PAD(c->cleanmarker_size) && !jeb->dirty_size
  580. && (!jeb->first_node || !jeb->first_node->next_phys) )
  581. return BLK_STATE_CLEANMARKER;
  582. /* move blocks with max 4 byte dirty space to cleanlist */
  583. else if (!ISDIRTY(c->sector_size - (jeb->used_size + jeb->unchecked_size))) {
  584. c->dirty_size -= jeb->dirty_size;
  585. c->wasted_size += jeb->dirty_size;
  586. jeb->wasted_size += jeb->dirty_size;
  587. jeb->dirty_size = 0;
  588. return BLK_STATE_CLEAN;
  589. } else if (jeb->used_size || jeb->unchecked_size)
  590. return BLK_STATE_PARTDIRTY;
  591. else
  592. return BLK_STATE_ALLDIRTY;
  593. }
  594. static struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uint32_t ino)
  595. {
  596. struct jffs2_inode_cache *ic;
  597. ic = jffs2_get_ino_cache(c, ino);
  598. if (ic)
  599. return ic;
  600. if (ino > c->highest_ino)
  601. c->highest_ino = ino;
  602. ic = jffs2_alloc_inode_cache();
  603. if (!ic) {
  604. printk(KERN_NOTICE "jffs2_scan_make_inode_cache(): allocation of inode cache failed\n");
  605. return NULL;
  606. }
  607. memset(ic, 0, sizeof(*ic));
  608. ic->ino = ino;
  609. ic->nodes = (void *)ic;
  610. jffs2_add_ino_cache(c, ic);
  611. if (ino == 1)
  612. ic->nlink = 1;
  613. return ic;
  614. }
  615. static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  616. struct jffs2_raw_inode *ri, uint32_t ofs)
  617. {
  618. struct jffs2_raw_node_ref *raw;
  619. struct jffs2_inode_cache *ic;
  620. uint32_t ino = je32_to_cpu(ri->ino);
  621. D1(printk(KERN_DEBUG "jffs2_scan_inode_node(): Node at 0x%08x\n", ofs));
  622. /* We do very little here now. Just check the ino# to which we should attribute
  623. this node; we can do all the CRC checking etc. later. There's a tradeoff here --
  624. we used to scan the flash once only, reading everything we want from it into
  625. memory, then building all our in-core data structures and freeing the extra
  626. information. Now we allow the first part of the mount to complete a lot quicker,
  627. but we have to go _back_ to the flash in order to finish the CRC checking, etc.
  628. Which means that the _full_ amount of time to get to proper write mode with GC
  629. operational may actually be _longer_ than before. Sucks to be me. */
  630. raw = jffs2_alloc_raw_node_ref();
  631. if (!raw) {
  632. printk(KERN_NOTICE "jffs2_scan_inode_node(): allocation of node reference failed\n");
  633. return -ENOMEM;
  634. }
  635. ic = jffs2_get_ino_cache(c, ino);
  636. if (!ic) {
  637. /* Inocache get failed. Either we read a bogus ino# or it's just genuinely the
  638. first node we found for this inode. Do a CRC check to protect against the former
  639. case */
  640. uint32_t crc = crc32(0, ri, sizeof(*ri)-8);
  641. if (crc != je32_to_cpu(ri->node_crc)) {
  642. printk(KERN_NOTICE "jffs2_scan_inode_node(): CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
  643. ofs, je32_to_cpu(ri->node_crc), crc);
  644. /* We believe totlen because the CRC on the node _header_ was OK, just the node itself failed. */
  645. DIRTY_SPACE(PAD(je32_to_cpu(ri->totlen)));
  646. jffs2_free_raw_node_ref(raw);
  647. return 0;
  648. }
  649. ic = jffs2_scan_make_ino_cache(c, ino);
  650. if (!ic) {
  651. jffs2_free_raw_node_ref(raw);
  652. return -ENOMEM;
  653. }
  654. }
  655. /* Wheee. It worked */
  656. raw->flash_offset = ofs | REF_UNCHECKED;
  657. raw->__totlen = PAD(je32_to_cpu(ri->totlen));
  658. raw->next_phys = NULL;
  659. raw->next_in_ino = ic->nodes;
  660. ic->nodes = raw;
  661. if (!jeb->first_node)
  662. jeb->first_node = raw;
  663. if (jeb->last_node)
  664. jeb->last_node->next_phys = raw;
  665. jeb->last_node = raw;
  666. D1(printk(KERN_DEBUG "Node is ino #%u, version %d. Range 0x%x-0x%x\n",
  667. je32_to_cpu(ri->ino), je32_to_cpu(ri->version),
  668. je32_to_cpu(ri->offset),
  669. je32_to_cpu(ri->offset)+je32_to_cpu(ri->dsize)));
  670. pseudo_random += je32_to_cpu(ri->version);
  671. UNCHECKED_SPACE(PAD(je32_to_cpu(ri->totlen)));
  672. return 0;
  673. }
  674. static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  675. struct jffs2_raw_dirent *rd, uint32_t ofs)
  676. {
  677. struct jffs2_raw_node_ref *raw;
  678. struct jffs2_full_dirent *fd;
  679. struct jffs2_inode_cache *ic;
  680. uint32_t crc;
  681. D1(printk(KERN_DEBUG "jffs2_scan_dirent_node(): Node at 0x%08x\n", ofs));
  682. /* We don't get here unless the node is still valid, so we don't have to
  683. mask in the ACCURATE bit any more. */
  684. crc = crc32(0, rd, sizeof(*rd)-8);
  685. if (crc != je32_to_cpu(rd->node_crc)) {
  686. printk(KERN_NOTICE "jffs2_scan_dirent_node(): Node CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
  687. ofs, je32_to_cpu(rd->node_crc), crc);
  688. /* We believe totlen because the CRC on the node _header_ was OK, just the node itself failed. */
  689. DIRTY_SPACE(PAD(je32_to_cpu(rd->totlen)));
  690. return 0;
  691. }
  692. pseudo_random += je32_to_cpu(rd->version);
  693. fd = jffs2_alloc_full_dirent(rd->nsize+1);
  694. if (!fd) {
  695. return -ENOMEM;
  696. }
  697. memcpy(&fd->name, rd->name, rd->nsize);
  698. fd->name[rd->nsize] = 0;
  699. crc = crc32(0, fd->name, rd->nsize);
  700. if (crc != je32_to_cpu(rd->name_crc)) {
  701. printk(KERN_NOTICE "jffs2_scan_dirent_node(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
  702. ofs, je32_to_cpu(rd->name_crc), crc);
  703. D1(printk(KERN_NOTICE "Name for which CRC failed is (now) '%s', ino #%d\n", fd->name, je32_to_cpu(rd->ino)));
  704. jffs2_free_full_dirent(fd);
  705. /* FIXME: Why do we believe totlen? */
  706. /* We believe totlen because the CRC on the node _header_ was OK, just the name failed. */
  707. DIRTY_SPACE(PAD(je32_to_cpu(rd->totlen)));
  708. return 0;
  709. }
  710. raw = jffs2_alloc_raw_node_ref();
  711. if (!raw) {
  712. jffs2_free_full_dirent(fd);
  713. printk(KERN_NOTICE "jffs2_scan_dirent_node(): allocation of node reference failed\n");
  714. return -ENOMEM;
  715. }
  716. ic = jffs2_scan_make_ino_cache(c, je32_to_cpu(rd->pino));
  717. if (!ic) {
  718. jffs2_free_full_dirent(fd);
  719. jffs2_free_raw_node_ref(raw);
  720. return -ENOMEM;
  721. }
  722. raw->__totlen = PAD(je32_to_cpu(rd->totlen));
  723. raw->flash_offset = ofs | REF_PRISTINE;
  724. raw->next_phys = NULL;
  725. raw->next_in_ino = ic->nodes;
  726. ic->nodes = raw;
  727. if (!jeb->first_node)
  728. jeb->first_node = raw;
  729. if (jeb->last_node)
  730. jeb->last_node->next_phys = raw;
  731. jeb->last_node = raw;
  732. fd->raw = raw;
  733. fd->next = NULL;
  734. fd->version = je32_to_cpu(rd->version);
  735. fd->ino = je32_to_cpu(rd->ino);
  736. fd->nhash = full_name_hash(fd->name, rd->nsize);
  737. fd->type = rd->type;
  738. USED_SPACE(PAD(je32_to_cpu(rd->totlen)));
  739. jffs2_add_fd_to_list(c, fd, &ic->scan_dents);
  740. return 0;
  741. }
  742. static int count_list(struct list_head *l)
  743. {
  744. uint32_t count = 0;
  745. struct list_head *tmp;
  746. list_for_each(tmp, l) {
  747. count++;
  748. }
  749. return count;
  750. }
  751. /* Note: This breaks if list_empty(head). I don't care. You
  752. might, if you copy this code and use it elsewhere :) */
  753. static void rotate_list(struct list_head *head, uint32_t count)
  754. {
  755. struct list_head *n = head->next;
  756. list_del(head);
  757. while(count--) {
  758. n = n->next;
  759. }
  760. list_add(head, n);
  761. }
  762. void jffs2_rotate_lists(struct jffs2_sb_info *c)
  763. {
  764. uint32_t x;
  765. uint32_t rotateby;
  766. x = count_list(&c->clean_list);
  767. if (x) {
  768. rotateby = pseudo_random % x;
  769. D1(printk(KERN_DEBUG "Rotating clean_list by %d\n", rotateby));
  770. rotate_list((&c->clean_list), rotateby);
  771. D1(printk(KERN_DEBUG "Erase block at front of clean_list is at %08x\n",
  772. list_entry(c->clean_list.next, struct jffs2_eraseblock, list)->offset));
  773. } else {
  774. D1(printk(KERN_DEBUG "Not rotating empty clean_list\n"));
  775. }
  776. x = count_list(&c->very_dirty_list);
  777. if (x) {
  778. rotateby = pseudo_random % x;
  779. D1(printk(KERN_DEBUG "Rotating very_dirty_list by %d\n", rotateby));
  780. rotate_list((&c->very_dirty_list), rotateby);
  781. D1(printk(KERN_DEBUG "Erase block at front of very_dirty_list is at %08x\n",
  782. list_entry(c->very_dirty_list.next, struct jffs2_eraseblock, list)->offset));
  783. } else {
  784. D1(printk(KERN_DEBUG "Not rotating empty very_dirty_list\n"));
  785. }
  786. x = count_list(&c->dirty_list);
  787. if (x) {
  788. rotateby = pseudo_random % x;
  789. D1(printk(KERN_DEBUG "Rotating dirty_list by %d\n", rotateby));
  790. rotate_list((&c->dirty_list), rotateby);
  791. D1(printk(KERN_DEBUG "Erase block at front of dirty_list is at %08x\n",
  792. list_entry(c->dirty_list.next, struct jffs2_eraseblock, list)->offset));
  793. } else {
  794. D1(printk(KERN_DEBUG "Not rotating empty dirty_list\n"));
  795. }
  796. x = count_list(&c->erasable_list);
  797. if (x) {
  798. rotateby = pseudo_random % x;
  799. D1(printk(KERN_DEBUG "Rotating erasable_list by %d\n", rotateby));
  800. rotate_list((&c->erasable_list), rotateby);
  801. D1(printk(KERN_DEBUG "Erase block at front of erasable_list is at %08x\n",
  802. list_entry(c->erasable_list.next, struct jffs2_eraseblock, list)->offset));
  803. } else {
  804. D1(printk(KERN_DEBUG "Not rotating empty erasable_list\n"));
  805. }
  806. if (c->nr_erasing_blocks) {
  807. rotateby = pseudo_random % c->nr_erasing_blocks;
  808. D1(printk(KERN_DEBUG "Rotating erase_pending_list by %d\n", rotateby));
  809. rotate_list((&c->erase_pending_list), rotateby);
  810. D1(printk(KERN_DEBUG "Erase block at front of erase_pending_list is at %08x\n",
  811. list_entry(c->erase_pending_list.next, struct jffs2_eraseblock, list)->offset));
  812. } else {
  813. D1(printk(KERN_DEBUG "Not rotating empty erase_pending_list\n"));
  814. }
  815. if (c->nr_free_blocks) {
  816. rotateby = pseudo_random % c->nr_free_blocks;
  817. D1(printk(KERN_DEBUG "Rotating free_list by %d\n", rotateby));
  818. rotate_list((&c->free_list), rotateby);
  819. D1(printk(KERN_DEBUG "Erase block at front of free_list is at %08x\n",
  820. list_entry(c->free_list.next, struct jffs2_eraseblock, list)->offset));
  821. } else {
  822. D1(printk(KERN_DEBUG "Not rotating empty free_list\n"));
  823. }
  824. }