summaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel/head_fsl_booke.S
diff options
context:
space:
mode:
authorKevin Hao <haokexin@gmail.com>2013-12-24 15:12:06 +0800
committerScott Wood <scottwood@freescale.com>2014-01-09 17:52:16 -0600
commitdd189692d40948d6445bbaeb8cb9bf9d15f54dc6 (patch)
treea52bf15f5aaa9cecb53efe0cd2bdf3bcc9c463ef /arch/powerpc/kernel/head_fsl_booke.S
parent1c49abec677c7ff495837dbaafad8e12f09c29fc (diff)
powerpc: enable the relocatable support for the fsl booke 32bit kernel
This is based on the codes in the head_44x.S. The difference is that the init tlb size we used is 64M. With this patch we can only load the kernel at address between memstart_addr ~ memstart_addr + 64M. We will fix this restriction in the following patches. Signed-off-by: Kevin Hao <haokexin@gmail.com> Signed-off-by: Scott Wood <scottwood@freescale.com>
Diffstat (limited to 'arch/powerpc/kernel/head_fsl_booke.S')
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S34
1 files changed, 34 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 196950f29c00..19bd574bda9d 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -73,6 +73,30 @@ _ENTRY(_start);
li r24,0 /* CPU number */
li r23,0 /* phys kernel start (high) */
+#ifdef CONFIG_RELOCATABLE
+ LOAD_REG_ADDR_PIC(r3, _stext) /* Get our current runtime base */
+
+ /* Translate _stext address to physical, save in r23/r25 */
+ bl get_phys_addr
+ mr r23,r3
+ mr r25,r4
+
+ /*
+ * We have the runtime (virutal) address of our base.
+ * We calculate our shift of offset from a 64M page.
+ * We could map the 64M page we belong to at PAGE_OFFSET and
+ * get going from there.
+ */
+ lis r4,KERNELBASE@h
+ ori r4,r4,KERNELBASE@l
+ rlwinm r6,r25,0,0x3ffffff /* r6 = PHYS_START % 64M */
+ rlwinm r5,r4,0,0x3ffffff /* r5 = KERNELBASE % 64M */
+ subf r3,r5,r6 /* r3 = r6 - r5 */
+ add r3,r4,r3 /* Required Virtual Address */
+
+ bl relocate
+#endif
+
/* We try to not make any assumptions about how the boot loader
* setup or used the TLBs. We invalidate all mappings from the
* boot loader and load a single entry in TLB1[0] to map the
@@ -182,6 +206,16 @@ _ENTRY(__early_start)
bl early_init
+#ifdef CONFIG_RELOCATABLE
+#ifdef CONFIG_PHYS_64BIT
+ mr r3,r23
+ mr r4,r25
+#else
+ mr r3,r25
+#endif
+ bl relocate_init
+#endif
+
#ifdef CONFIG_DYNAMIC_MEMSTART
lis r3,kernstart_addr@ha
la r3,kernstart_addr@l(r3)