summaryrefslogtreecommitdiff
path: root/core/minute-ia/switch.S
blob: 79adfef7bd20cd6bf1af353c5582edb661026f9f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
/* Copyright 2016 The Chromium OS Authors. All rights reserved.
 * Use of this source code is governed by a BSD-style license that can be
 * found in the LICENSE file.
 *
 * x86 task swtching and interrupt handling
 */

#include "config.h"
#include "registers.h"
#include "task_defs.h"

#ifdef CONFIG_TASK_PROFILING
#define task_start_irq_handler_call call task_start_irq_handler
#else
#define task_start_irq_handler_call
#endif

.text

.extern current_task
.extern next_task

.global __task_start
.global __switchto
.global default_int_handler

# Start the task scheduling. Start current_task (hook_task)
# This function is not an ISR but imitates the sequence.
.align 4
.func __task_start
__task_start:
	movl 0x4(%esp), %ecx
	movl current_task, %eax
	movl (%eax), %esp
#ifdef CONFIG_FPU
	movl USE_FPU_OFFSET(%eax), %ebx
	test %ebx, %ebx
	jz 1f
	frstor FPU_CTX_OFFSET(%eax)
	1:
#endif
	movl $0x1, (%ecx)       # first task is ready. set start_called = 1
	popa
	iret
.endfunc

# Default interrupt handler - to handle exceptions
# and prints error
.align 4
.func default_int_handler
default_int_handler:

	pusha
	ASM_LOCK_PREFIX addl $1, __in_isr

	movl %esp, %eax
	movl $stack_end, %esp		# use system stack
	push %eax			# push sp of preempted context

	call unhandled_vector		# Handle system interrupts and
					# unregistered user interrupts
	pop %esp			# restore sp of preempted context
	# unhandled_vector call loads eax with vector for comparison
	cmpl $LAPIC_SPURIOUS_INT_VECTOR, %eax
	je 1f				# No EOI for LAPIC_SPURIOUS_INT_VECTOR

	movl %eax, IOAPIC_EOI_REG	# Indicate completion of servicing the
					# interrupt to IOAPIC first
	movl $0x00, LAPIC_EOI_REG	# Indicate completion of servicing the
					# interrupt to LAPIC next
	1:
	# Ensure we balance the __in_isr counter
	ASM_LOCK_PREFIX subl $1, __in_isr
	popa
	iret

.endfunc

# Switches from one task to another if ready.
# __schedule triggers software interrupt ISH_TS_VECTOR, which is handled by
# __switchto
.align 4
.func __switchto
__switchto:

	# Save current task
	pusha
	ASM_LOCK_PREFIX addl $1, __in_isr

	movl %esp, %eax
	movl $stack_end, %esp		# use system stack
	push %eax			# push sp of preempted context

	# __schedule() copies 'resched' to %ecx and 'desched' to %edx before
	# triggering ISH_TS_VECTOR
	#
	# Push %ecx and %edx into stack to pass them as function parameters
	# to switch_handler(desched, resched). After call, we clean up stack
	# pointer. Note, we do this now before task_start_irq has a chance
	# to clobber these caller-saved registers.
	push %ecx
	push %edx

	# We don't push anything on the stack for start irq since the
	# parameter is unused.
	task_start_irq_handler_call

	# Stack is already set up from previous pushes
	call switch_handler
	addl $0x8, %esp		# Clean up stack
	pop %esp		# restore sp of preempted context

	test %eax, %eax		# Check if task switch required
	jz 1f

	movl current_task, %eax

#ifdef CONFIG_FPU
	movl USE_FPU_OFFSET(%eax), %ebx
	test %ebx, %ebx
	jz 2f
	fnsave FPU_CTX_OFFSET(%eax) # Save current FPU context(current->fp_ctx)
	2:
#endif

	# Save SP of current task and switch to new task
	movl %esp, (%eax)
	movl next_task, %eax
	movl %eax, current_task
	movl (%eax), %esp

#ifdef CONFIG_FPU
	movl USE_FPU_OFFSET(%eax), %ebx
	test %ebx, %ebx
	jz 1f
	frstor FPU_CTX_OFFSET(%eax)		# Restore next FPU context
#endif

	1:

	# Indicate completion of servicing the interrupt to LAPIC.
	# No IOAPIC EOI needed as this is SW triggered.
	movl $0x00, LAPIC_EOI_REG

	# Decrement ISR counter and restore general purpose registers.
	ASM_LOCK_PREFIX subl $1, __in_isr
	popa
	iret

.endfunc