This patch changes the ENTRY() macro in asm.h to create a new section for every assembler function, thus providing dcache_clean/invalidate_all and friends with the same --gc-sections goodness that our C functions have. This requires a few minor changes of moving around data (to make sure it ends up in the right section) and changing some libgcc functions (which apparently need to have two names?), but nothing serious. (You may note that some of our assembly functions have data, sometimes even writable, within the same .text section. This has been this way before and I'm not looking to change it for now, although it's not totally clean. Since we don't enforce read-only sections through paging, it doesn't really hurt.) BUG=None TEST=Nyan and Snow still boot. Confirm dcache_invalidate_all is not output into any binary anymore since no one actually uses it. Original-Change-Id: I247b29d6173ba516c8dff59126c93b66f7dc4b8d Original-Signed-off-by: Julius Werner <jwerner@chromium.org> Original-Reviewed-on: https://chromium-review.googlesource.com/183891 (cherry picked from commit 4a3f2e45e06cc8592d56c3577f41ff879f10e9cc) Signed-off-by: Marc Jones <marc.jones@se-eng.com> Change-Id: Ieaa4f2ea9d81c5b9e2b36a772ff9610bdf6446f9 Reviewed-on: http://review.coreboot.org/7451 Tested-by: build bot (Jenkins) Reviewed-by: David Hendricks <dhendrix@chromium.org>
		
			
				
	
	
		
			122 lines
		
	
	
		
			2.3 KiB
		
	
	
	
		
			ArmAsm
		
	
	
	
	
	
			
		
		
	
	
			122 lines
		
	
	
		
			2.3 KiB
		
	
	
	
		
			ArmAsm
		
	
	
	
	
	
| /*
 | |
|  *  linux/arch/arm/lib/memset.S
 | |
|  *
 | |
|  *  Copyright (C) 1995-2000 Russell King
 | |
|  *
 | |
|  * This program is free software; you can redistribute it and/or modify
 | |
|  * it under the terms of the GNU General Public License version 2 as
 | |
|  * published by the Free Software Foundation.
 | |
|  *
 | |
|  *  ASM optimised string functions
 | |
|  */
 | |
| 
 | |
| #include <arch/asm.h>
 | |
| #include "asmlib.h"
 | |
| 
 | |
| ENTRY(memset)
 | |
| 	ands	r3, r0, #3		@ 1 unaligned?
 | |
| 	mov	ip, r0			@ preserve r0 as return value
 | |
| 	bne	6f			@ 1
 | |
| /*
 | |
|  * we know that the pointer in ip is aligned to a word boundary.
 | |
|  */
 | |
| 1:	orr	r1, r1, r1, lsl #8
 | |
| 	orr	r1, r1, r1, lsl #16
 | |
| 	mov	r3, r1
 | |
| 	cmp	r2, #16
 | |
| 	blt	4f
 | |
| 
 | |
| #if ! CALGN(1)+0
 | |
| 
 | |
| /*
 | |
|  * We need 2 extra registers for this loop - use r8 and the LR
 | |
|  */
 | |
| 	stmfd	sp!, {r8, lr}
 | |
| 	mov	r8, r1
 | |
| 	mov	lr, r1
 | |
| 
 | |
| 2:	subs	r2, r2, #64
 | |
| 	stmgeia	ip!, {r1, r3, r8, lr}	@ 64 bytes at a time.
 | |
| 	stmgeia	ip!, {r1, r3, r8, lr}
 | |
| 	stmgeia	ip!, {r1, r3, r8, lr}
 | |
| 	stmgeia	ip!, {r1, r3, r8, lr}
 | |
| 	bgt	2b
 | |
| 	ldmeqfd	sp!, {r8, pc}		@ Now <64 bytes to go.
 | |
| /*
 | |
|  * No need to correct the count; we're only testing bits from now on
 | |
|  */
 | |
| 	tst	r2, #32
 | |
| 	stmneia	ip!, {r1, r3, r8, lr}
 | |
| 	stmneia	ip!, {r1, r3, r8, lr}
 | |
| 	tst	r2, #16
 | |
| 	stmneia	ip!, {r1, r3, r8, lr}
 | |
| 	ldmfd	sp!, {r8, lr}
 | |
| 
 | |
| #else
 | |
| 
 | |
| /*
 | |
|  * This version aligns the destination pointer in order to write
 | |
|  * whole cache lines at once.
 | |
|  */
 | |
| 
 | |
| 	stmfd	sp!, {r4-r8, lr}
 | |
| 	mov	r4, r1
 | |
| 	mov	r5, r1
 | |
| 	mov	r6, r1
 | |
| 	mov	r7, r1
 | |
| 	mov	r8, r1
 | |
| 	mov	lr, r1
 | |
| 
 | |
| 	cmp	r2, #96
 | |
| 	tstgt	ip, #31
 | |
| 	ble	3f
 | |
| 
 | |
| 	and	r8, ip, #31
 | |
| 	rsb	r8, r8, #32
 | |
| 	sub	r2, r2, r8
 | |
| 	movs	r8, r8, lsl #(32 - 4)
 | |
| 	stmcsia	ip!, {r4, r5, r6, r7}
 | |
| 	stmmiia	ip!, {r4, r5}
 | |
| 	tst	r8, #(1 << 30)
 | |
| 	mov	r8, r1
 | |
| 	strne	r1, [ip], #4
 | |
| 
 | |
| 3:	subs	r2, r2, #64
 | |
| 	stmgeia	ip!, {r1, r3-r8, lr}
 | |
| 	stmgeia	ip!, {r1, r3-r8, lr}
 | |
| 	bgt	3b
 | |
| 	ldmeqfd	sp!, {r4-r8, pc}
 | |
| 
 | |
| 	tst	r2, #32
 | |
| 	stmneia	ip!, {r1, r3-r8, lr}
 | |
| 	tst	r2, #16
 | |
| 	stmneia	ip!, {r4-r7}
 | |
| 	ldmfd	sp!, {r4-r8, lr}
 | |
| 
 | |
| #endif
 | |
| 
 | |
| 4:	tst	r2, #8
 | |
| 	stmneia	ip!, {r1, r3}
 | |
| 	tst	r2, #4
 | |
| 	strne	r1, [ip], #4
 | |
| /*
 | |
|  * When we get here, we've got less than 4 bytes to zero.  We
 | |
|  * may have an unaligned pointer as well.
 | |
|  */
 | |
| 5:	tst	r2, #2
 | |
| 	strneb	r1, [ip], #1
 | |
| 	strneb	r1, [ip], #1
 | |
| 	tst	r2, #1
 | |
| 	strneb	r1, [ip], #1
 | |
| 	mov	pc, lr
 | |
| 
 | |
| 6:	subs	r2, r2, #4		@ 1 do we have enough
 | |
| 	blt	5b			@ 1 bytes to align with?
 | |
| 	cmp	r3, #2			@ 1
 | |
| 	strltb	r1, [ip], #1		@ 1
 | |
| 	strleb	r1, [ip], #1		@ 1
 | |
| 	strb	r1, [ip], #1		@ 1
 | |
| 	add	r2, r2, r3		@ 1 (r2 = r2 - (4 - r3))
 | |
| 	b	1b
 | |
| ENDPROC(memset)
 |