Rafał Miłecki 5e885c09c6 bcm53xx: switch back to standalone ASM entry flushing cache
Over a year ago in a commit ac96a1665ac7 ("bcm53xx: update Disable MMU
and Dcache during decompression") we switched to Florian's patch for
workarounding CFE bug. The main difference was using a part of existing
__armv7_mmu_cache_flush instead of implementing flushing separately.

This worked well for Northstar devices but doesn't work for BCM53573 as
these devices simply don't start booting with Florian's patch. It's
because of the ldmfd ASM instruction in the __armv7_mmu_cache_flush.

So this commit switches back to using standalone implementation. This
time instead of copying Broadcom's copy of cache-v7.S, we just make a
copy of the original file on our own. Unfortunately we can't cross-dir
compile cache-v7.S from ../../mm/ as that one also adds __INITDATA with
define_cache_functions v7 which would just trigger
> Error: unrecognized/unsupported machine ID (r1 = 0x0000007f).

The only real change Broadcom did in copied .S file was modifying mcr
instruction to use c6 instead of c14. It isn't clear to me if we really
need it, but let's use it for now.

By the way we also update commit message of the
[PATCH] ARM: BCM5301X: Disable MMU and Dcache during decompression

This makes kernel booting on BCM53573 successfully.

Signed-off-by: Rafał Miłecki <rafal@milecki.pl>
2016-08-19 11:38:44 +02:00

68 lines
2.2 KiB
ArmAsm

/*
* This is a part of mm/cache-v7.S with extracted entry flushing D-cache. We
* need it for Broadcom devices with broken bootloader leaving cache enabled.
*
* Copyright (C) 2001 Deep Blue Solutions Ltd.
* Copyright (C) 2005 ARM Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <linux/init.h>
__INIT
/*
* v7_flush_dcache_all()
*
* Flush the whole D-cache.
*
* Corrupted registers: r0-r5, r7, r9-r11
*
* - mm - mm_struct describing address space
*/
ENTRY(v7_flush_dcache_all)
mrc p15, 1, r0, c0, c0, 1 @ read clidr
ands r3, r0, #0x7000000 @ extract loc from clidr
mov r3, r3, lsr #23 @ left align loc bit field
beq finished @ if loc is 0, then no need to clean
mov r10, #0 @ start clean at cache level 0
loop1:
add r2, r10, r10, lsr #1 @ work out 3x current cache level
mov r1, r0, lsr r2 @ extract cache type bits from clidr
and r1, r1, #7 @ mask of the bits for current cache only
cmp r1, #2 @ see what cache we have at this level
blt skip @ skip if no cache, or just i-cache
mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
isb @ isb to sych the new cssr&csidr
mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
and r2, r1, #7 @ extract the length of the cache lines
add r2, r2, #4 @ add 4 (line length offset)
ldr r4, =0x3ff
ands r4, r4, r1, lsr #3 @ find maximum number on the way size
clz r5, r4 @ find bit position of way size increment
ldr r7, =0x7fff
ands r7, r7, r1, lsr #13 @ extract max number of the index size
loop2:
mov r9, r4 @ create working copy of max way size
loop3:
orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
orr r11, r11, r7, lsl r2 @ factor index number into r11
mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
subs r9, r9, #1 @ decrement the way
bge loop3
subs r7, r7, #1 @ decrement the index
bge loop2
skip:
add r10, r10, #2 @ increment cache number
cmp r3, r10
bgt loop1
finished:
mov r10, #0 @ swith back to cache level 0
mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
isb
mov pc, lr