NAND fixes:

- Fix read path of the Marvell NAND driver
 - Make sure we don't pass a u64 to ndelay()
 
 CFI fixes:
 - Fix the map_word_andequal() implementation
 -----BEGIN PGP SIGNATURE-----
 
 iQI5BAABCAAjBQJa/oE+HBxib3Jpcy5icmV6aWxsb25AYm9vdGxpbi5jb20ACgkQ
 Ze02AX4ItwD92RAAoXYBLGEbxfBL6GW8mhfjBKxgs8sjsbXoBfSTVwJUvohvT8rZ
 vAGOqP8D2BmGHWcroYMXI9CDxH/FctpndduH6GYaek7S+MXkzfXonmx2MPvcbnKI
 qm+YowweYEfeCb34UWluwtkXlavSrXcwbN+zHUTqej1H1xXSRxUrgij2iH+OjLci
 qGsmV58Qq6gAUzp0w760fTsUOb89qf8NLYNItNGTmKTneJ8ECc5uweEzkKZIp+8G
 3BOMq0YU7XstWwWj5fojbSiXpJ02pqIm2Ss/QzTLkqWGzAAYwRPlO9SLQWlEGx2b
 mRGE0dIsQEiJT2tC/+2UY6wlLB1r8gxB8aW+G8dP/rQvkCPyGAr4iA1PlUzzZP5X
 fg5LTdSVRdWGF8HrS5VURXBHwn/KPjJ03ofqiGMcmRJ+Th7TvKvpVxLukQ1r7KJT
 JNSkdarva1cwljLqE248pTIq+TFR7F53HUnrf0+5uRRdxTQFJ7vkiAndvFFDw0tB
 nmbToXmrC+1wCBrSVK2nePvKMpKtaHWJZJTB9yAcPcExYuvgDbzFKy+Q0fIRFOr7
 3i5I9OiJ5abcIAo+X/vg3LExibyAGdoL02xJeekRRE8SUkQVTXVl9BSiPlhIZXe3
 ajdBab8joQ9fU3n5OMbUTuFao/XARlqAZvpRroA0kGkWUpdjHEkT+zit8dU=
 =a+lh
 -----END PGP SIGNATURE-----

Merge tag 'mtd/fixes-for-4.17-rc6' of git://git.infradead.org/linux-mtd

Pull mtd fixes from Boris Brezillon:
 "NAND fixes:
   - Fix read path of the Marvell NAND driver
   - Make sure we don't pass a u64 to ndelay()

  CFI fix:
   - Fix the map_word_andequal() implementation"

* tag 'mtd/fixes-for-4.17-rc6' of git://git.infradead.org/linux-mtd:
  mtd: rawnand: Fix return type of __DIVIDE() when called with 32-bit
  mtd: rawnand: marvell: Fix read logic for layouts with ->nchunks > 2
  mtd: Fix comparison in map_word_andequal()
This commit is contained in:
Linus Torvalds 2018-05-18 09:58:29 -07:00
commit 163ced613c
3 changed files with 17 additions and 9 deletions

View File

@ -1194,11 +1194,13 @@ static void marvell_nfc_hw_ecc_bch_read_chunk(struct nand_chip *chip, int chunk,
NDCB0_CMD2(NAND_CMD_READSTART);
/*
* Trigger the naked read operation only on the last chunk.
* Otherwise, use monolithic read.
* Trigger the monolithic read on the first chunk, then naked read on
* intermediate chunks and finally a last naked read on the last chunk.
*/
if (lt->nchunks == 1 || (chunk < lt->nchunks - 1))
if (chunk == 0)
nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW);
else if (chunk < lt->nchunks - 1)
nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_NAKED_RW);
else
nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);

View File

@ -312,7 +312,7 @@ void map_destroy(struct mtd_info *mtd);
({ \
int i, ret = 1; \
for (i = 0; i < map_words(map); i++) { \
if (((val1).x[i] & (val2).x[i]) != (val2).x[i]) { \
if (((val1).x[i] & (val2).x[i]) != (val3).x[i]) { \
ret = 0; \
break; \
} \

View File

@ -867,12 +867,18 @@ struct nand_op_instr {
* tBERS (during an erase) which all of them are u64 values that cannot be
* divided by usual kernel macros and must be handled with the special
* DIV_ROUND_UP_ULL() macro.
*
* Cast to type of dividend is needed here to guarantee that the result won't
* be an unsigned long long when the dividend is an unsigned long (or smaller),
* which is what the compiler does when it sees ternary operator with 2
* different return types (picks the largest type to make sure there's no
* loss).
*/
#define __DIVIDE(dividend, divisor) ({ \
sizeof(dividend) == sizeof(u32) ? \
DIV_ROUND_UP(dividend, divisor) : \
DIV_ROUND_UP_ULL(dividend, divisor); \
})
#define __DIVIDE(dividend, divisor) ({ \
(__typeof__(dividend))(sizeof(dividend) <= sizeof(unsigned long) ? \
DIV_ROUND_UP(dividend, divisor) : \
DIV_ROUND_UP_ULL(dividend, divisor)); \
})
#define PSEC_TO_NSEC(x) __DIVIDE(x, 1000)
#define PSEC_TO_MSEC(x) __DIVIDE(x, 1000000000)