1use core::{arch::asm, hint::spin_loop, ops::Index, ptr::NonNull};
2
3use super::IntId;
4use aarch64_cpu::registers::MPIDR_EL1;
5use alloc::boxed::Box;
6use rdif_intc::*;
7use tock_registers::{interfaces::*, register_bitfields, register_structs, registers::*};
8
9use super::*;
10
11const GICC_SRE_SRE: usize = 1 << 0;
12const GICC_SRE_DFB: usize = 1 << 1;
13const GICC_SRE_DIB: usize = 1 << 2;
14
15#[derive(Debug, Clone, Copy)]
16pub enum Security {
17 Two,
18 OneNS,
19}
20
21impl Default for Security {
22 fn default() -> Self {
23 Self::Two
24 }
25}
26
27pub struct Gic {
28 gicd: NonNull<Distributor>,
29 gicr: NonNull<u8>,
30 security: Security,
31 max_spi_num: usize,
32}
33
34impl Gic {
35 pub fn new(gicd: NonNull<u8>, gicr: NonNull<u8>, security: Security) -> Self {
36 Self {
37 gicd: gicd.cast(),
38 gicr,
39 security,
40 max_spi_num: 0,
41 }
42 }
43
44 fn reg(&self) -> &Distributor {
45 unsafe { self.gicd.as_ref() }
46 }
47
48 fn reg_mut(&mut self) -> &mut Distributor {
49 unsafe { self.gicd.as_mut() }
50 }
51
52 fn wait_ctlr(&self) {
53 while self.reg().CTLR.is_set(CTLR::RWP) {
54 spin_loop();
55 }
56 }
57 fn rd_slice(&self) -> RDv3Slice {
58 RDv3Slice::new(self.gicr)
59 }
60 fn current_rd(&self) -> NonNull<RedistributorV3> {
61 let want = (MPIDR_EL1.get() & 0xFFF) as u32;
62
63 for rd in self.rd_slice().iter() {
64 let affi = unsafe { rd.as_ref() }.lpi_ref().TYPER.read(TYPER::Affinity) as u32;
65 if affi == want {
66 return rd;
67 }
68 }
69 panic!("No current redistributor")
70 }
71
72 fn rd_mut(&mut self) -> &mut RedistributorV3 {
73 unsafe { self.current_rd().as_mut() }
74 }
75}
76
77unsafe impl Send for Gic {}
78
79impl DriverGeneric for Gic {
80 fn open(&mut self) -> DriverResult {
81 self.reg_mut().CTLR.set(0);
83 self.wait_ctlr();
84
85 self.max_spi_num = self.reg().max_spi_num();
86
87 if matches!(self.security, Security::OneNS) {
88 self.reg_mut().CTLR.modify(CTLR::DS::SET);
89 }
90
91 for reg in self.reg_mut().ICENABLER.iter_mut() {
93 reg.set(u32::MAX);
94 }
95
96 for reg in self.reg_mut().ICPENDR.iter_mut() {
97 reg.set(u32::MAX);
98 }
99
100 for reg in self.reg_mut().IGROUPR.iter_mut() {
101 reg.set(u32::MAX);
102 }
103
104 for reg in self.reg_mut().IGRPMODR.iter() {
105 reg.set(u32::MAX);
106 }
107
108 self.wait_ctlr();
109
110 for reg in self.reg_mut().IPRIORITYR.iter_mut() {
111 reg.set(0xa0);
112 }
113
114 for reg in self.reg_mut().ICFGR.iter_mut() {
115 reg.set(0x0);
116 }
117
118 match self.security {
119 Security::Two => self
120 .reg_mut()
121 .CTLR
122 .write(CTLR::ARE_NS::SET + CTLR::EnableGrp1NS::SET),
123 Security::OneNS => self
124 .reg_mut()
125 .CTLR
126 .write(CTLR::ARE_S::SET + CTLR::EnableGrp1S::SET),
127 }
128 Ok(())
129 }
130
131 fn close(&mut self) -> DriverResult {
132 Ok(())
133 }
134}
135
136macro_rules! cpu_read {
137 ($name: expr) => {{
138 let x: usize;
139 unsafe {
140 core::arch::asm!(concat!("mrs {}, ", $name), out(reg) x);
141 }
142 x
143 }};
144}
145
146macro_rules! cpu_write {
147 ($name: expr, $value: expr) => {{
148 let x = $value;
149 unsafe {
150 core::arch::asm!(concat!("msr ", $name, ", {0:x}"), in(reg) x);
151 }
152 }};
153}
154impl Interface for Gic {
155 fn current_cpu_setup(&self) -> HardwareCPU {
156 let rd = self.current_rd();
157 Box::new(GicCpu::new(rd))
158 }
159
160 fn irq_enable(&mut self, irq: IrqId) {
161 let id = IntId::from(irq);
162 if id.is_private() {
163 self.rd_mut().sgi.set_enable_interrupt(id, true);
164 } else {
165 self.reg_mut().set_enable_interrupt(id, true);
166 }
167 }
168
169 fn irq_disable(&mut self, irq: IrqId) {
170 let intid = IntId::from(irq);
171 if intid.is_private() {
172 self.rd_mut().sgi.set_enable_interrupt(intid, false);
173 } else {
174 self.reg_mut().set_enable_interrupt(intid, false);
175 }
176 }
177
178 fn set_priority(&mut self, irq: IrqId, priority: usize) {
179 let intid = IntId::from(irq);
180 if intid.is_private() {
181 self.rd_mut().sgi.set_priority(intid, priority as _);
182 } else {
183 self.reg_mut().set_priority(intid, priority as _);
184 }
185 }
186
187 fn set_trigger(&mut self, irq: IrqId, trigger: Trigger) {
188 let intid = IntId::from(irq);
189 if intid.is_private() {
190 self.rd_mut().sgi.set_cfgr(intid, trigger);
191 } else {
192 self.reg_mut().set_cfgr(intid, trigger);
193 }
194 }
195
196 fn set_target_cpu(&mut self, irq: IrqId, cpu: CpuId) {
197 let intid = IntId::from(irq);
198 if intid.is_private() {
199 return;
200 }
202
203 let mpid: usize = cpu.into();
204 let target = CPUTarget::from(MPID::from(mpid as u64));
205 self.reg_mut().set_route(intid, target);
206 }
207}
208
209pub struct GicCpu {}
210
211unsafe impl Send for GicCpu {}
212unsafe impl Sync for GicCpu {}
213
214impl GicCpu {
215 fn new(mut rd: NonNull<RedistributorV3>) -> Self {
216 let rd = unsafe { rd.as_mut() };
217
218 rd.lpi.wake();
219 rd.sgi.ICENABLER0.set(u32::MAX);
220 rd.sgi.ICPENDR0.set(u32::MAX);
221 rd.sgi.IGROUPR0.set(u32::MAX);
222 rd.sgi.IGRPMODR0.set(u32::MAX);
223
224 let mut reg = cpu_read!("ICC_SRE_EL1");
225 if (reg & GICC_SRE_SRE) == 0 {
226 reg |= GICC_SRE_SRE | GICC_SRE_DFB | GICC_SRE_DIB;
227 cpu_write!("ICC_SRE_EL1", reg);
228 }
229
230 cpu_write!("ICC_PMR_EL1", 0xFF);
231 enable_group1();
232 const GICC_CTLR_CBPR: usize = 1 << 0;
233 cpu_write!("ICC_CTLR_EL1", GICC_CTLR_CBPR);
234
235 Self {}
236 }
237}
238
239impl InterfaceCPU for GicCpu {
240 fn get_and_acknowledge_interrupt(&self) -> Option<IrqId> {
241 let intid = cpu_read!("icc_iar1_el1");
242
243 if intid == SPECIAL_RANGE.start as usize {
244 None
245 } else {
246 Some(intid.into())
247 }
248 }
249
250 fn end_interrupt(&self, irq: IrqId) {
251 let intid: usize = irq.into();
252 cpu_write!("icc_eoir1_el1", intid);
253 }
254}
255
256#[allow(unused)]
257type RDv3Slice = RedistributorSlice<RedistributorV3>;
258#[allow(unused)]
259type RDv4Slice = RedistributorSlice<RedistributorV4>;
260
261pub trait RedistributorItem {
262 fn lpi_ref(&self) -> &LPI;
263}
264
265pub(crate) struct RedistributorV3 {
266 pub lpi: LPI,
267 pub sgi: SGI,
268}
269
270#[allow(unused)]
271pub(crate) struct RedistributorV4 {
272 pub lpi: LPI,
273 pub sgi: SGI,
274 pub _vlpi: LPI,
275 pub _vsgi: SGI,
276}
277impl RedistributorItem for RedistributorV3 {
278 fn lpi_ref(&self) -> &LPI {
279 &self.lpi
280 }
281}
282impl RedistributorItem for RedistributorV4 {
283 fn lpi_ref(&self) -> &LPI {
284 &self.lpi
285 }
286}
287pub struct RedistributorSlice<T: RedistributorItem> {
288 ptr: NonNull<T>,
289}
290
291impl<T: RedistributorItem> RedistributorSlice<T> {
292 pub fn new(ptr: NonNull<u8>) -> Self {
293 Self { ptr: ptr.cast() }
294 }
295
296 pub fn iter(&self) -> RedistributorIter<T> {
297 RedistributorIter::new(self.ptr)
298 }
299}
300
301pub struct RedistributorIter<T: RedistributorItem> {
302 ptr: NonNull<T>,
303 is_last: bool,
304}
305
306impl<T: RedistributorItem> RedistributorIter<T> {
307 pub fn new(p: NonNull<T>) -> Self {
308 Self {
309 ptr: p,
310 is_last: false,
311 }
312 }
313}
314
315impl<T: RedistributorItem> Iterator for RedistributorIter<T> {
316 type Item = NonNull<T>;
317
318 fn next(&mut self) -> Option<Self::Item> {
319 if self.is_last {
320 return None;
321 }
322 unsafe {
323 let ptr = self.ptr;
324 let rd = ptr.as_ref();
325 let lpi = rd.lpi_ref();
326 if lpi.TYPER.read(TYPER::Last) > 0 {
327 self.is_last = true;
328 }
329 self.ptr = self.ptr.add(1);
330 Some(ptr)
331 }
332 }
333}
334
335impl<T: RedistributorItem> Index<CPUTarget> for RedistributorSlice<T> {
336 type Output = T;
337
338 fn index(&self, index: CPUTarget) -> &Self::Output {
339 let affinity = index.affinity();
340 for rd in self.iter() {
341 let affi = unsafe { rd.as_ref() }.lpi_ref().TYPER.read(TYPER::Affinity) as u32;
342 if affi == affinity {
343 return unsafe { rd.as_ref() };
344 }
345 }
346 unreachable!()
347 }
348}
349
350register_structs! {
351 #[allow(non_snake_case)]
353 pub LPI {
354 (0x0000 => CTLR: ReadWrite<u32, RCtrl::Register>),
355 (0x0004 => IIDR: ReadOnly<u32>),
356 (0x0008 => TYPER: ReadOnly<u64, TYPER::Register>),
357 (0x0010 => STATUSR: ReadWrite<u32>),
358 (0x0014 => WAKER: ReadWrite<u32, WAKER::Register>),
359 (0x0018 => _rsv0),
360 (0x0fe8 => PIDR2 : ReadOnly<u32, PIDR2::Register>),
361 (0x0fec => _rsv1),
362 (0x10000 => @END),
363 }
364}
365register_bitfields! [
366 u32,
367 RCtrl [
368 EnableLPIs OFFSET(0) NUMBITS(1) [],
369 CES OFFSET(1) NUMBITS(1) [],
370 IR OFFSET(2) NUMBITS(1) [],
371 RWP OFFSET(3) NUMBITS(1) [],
372 DPG OFFSET(24) NUMBITS(1) [],
373 DPG1NS OFFSET(25) NUMBITS(1) [],
374 DPG1S OFFSET(26) NUMBITS(1) [],
375 UWP OFFSET(31) NUMBITS(1) [],
376 ],
377];
378
379impl LPI {
380 pub fn wake(&self) {
381 self.WAKER.write(WAKER::ProcessorSleep::CLEAR);
382
383 while self.WAKER.is_set(WAKER::ChildrenAsleep) {
384 spin_loop();
385 }
386
387 while self.CTLR.is_set(RCtrl::RWP) {
388 spin_loop();
389 }
390 }
391}
392
393register_structs! {
394 #[allow(non_snake_case)]
395 pub SGI {
396 (0x0000 => _rsv0),
397 (0x0080 => IGROUPR0: ReadWrite<u32>),
398 (0x0084 => IGROUPR_E: [ReadWrite<u32>; 2]),
399 (0x008C => _rsv1),
400 (0x0100 => ISENABLER0: ReadWrite<u32>),
401 (0x0104 => ISENABLER_E: [ReadWrite<u32>;2]),
402 (0x010C => _rsv2),
403 (0x0180 => ICENABLER0 : ReadWrite<u32>),
404 (0x0184 => ICENABLER_E: [ReadWrite<u32>;2]),
405 (0x018C => _rsv3),
406 (0x0200 => ISPENDR0: ReadWrite<u32>),
407 (0x0204 => ISPENDR_E: [ReadWrite<u32>; 2]),
408 (0x020C => _rsv4),
409 (0x0280 => ICPENDR0: ReadWrite<u32>),
410 (0x0284 => ICPENDR_E: [ReadWrite<u32>; 2]),
411 (0x028C => _rsv5),
412 (0x0400 => IPRIORITYR: [ReadWrite<u8>; 32]),
413 (0x0420 => IPRIORITYR_E: [ReadWrite<u8>; 64]),
414 (0x0460 => _rsv6),
415 (0x0C00 => ICFGR : [ReadWrite<u32>; 6]),
416 (0x0C18 => _rsv7),
417 (0x0D00 => IGRPMODR0 : ReadWrite<u32>),
418 (0x0D04 => IGRPMODR_E: [ReadWrite<u32>;2]),
419 (0x0D0C => _rsv8),
420 (0x10000 => @END),
421 }
422}
423impl SGI {
424 pub fn set_enable_interrupt(&self, irq: IntId, enable: bool) {
425 let int_id: u32 = irq.into();
426 let bit = 1 << (int_id % 32);
427 if enable {
428 self.ISENABLER0.set(bit);
429 } else {
430 self.ICENABLER0.set(bit);
431 }
432 }
433 pub fn set_priority(&self, intid: IntId, priority: u8) {
434 self.IPRIORITYR[u32::from(intid) as usize].set(priority)
435 }
436
437 fn set_cfgr(&self, intid: IntId, trigger: Trigger) {
438 let clean = !((intid.to_u32() % 16) << 1);
439 let bit: u32 = match trigger {
440 Trigger::EdgeBoth => 1,
441 Trigger::EdgeRising => 1,
442 Trigger::EdgeFailling => 1,
443 Trigger::LevelHigh => 0,
444 Trigger::LevelLow => 0,
445 } << ((intid.to_u32() % 16) << 1);
446
447 if intid.is_sgi() {
448 let mut mask = self.ICFGR[0].get();
449 mask &= clean;
450 mask |= bit;
451
452 self.ICFGR[0].set(mask);
453 } else {
454 let mut mask = self.ICFGR[1].get();
455 mask &= clean;
456 mask |= bit;
457 self.ICFGR[1].set(mask);
458 }
459 }
460}
461
462register_bitfields! [
463 u64,
464 TYPER [
465 PLPIS OFFSET(0) NUMBITS(1) [],
467 VLPIS OFFSET(1) NUMBITS(1) [],
468 Dirty OFFSET(2) NUMBITS(1) [],
469 Last OFFSET(4) NUMBITS(1) [],
470 Affinity OFFSET(32) NUMBITS(32) [],
471 ],
472
473 pub IROUTER [
474 AFF0 OFFSET(0) NUMBITS(8) [],
475 AFF1 OFFSET(8) NUMBITS(8) [],
476 AFF2 OFFSET(16) NUMBITS(8) [],
477 InterruptRoutingMode OFFSET(31) NUMBITS(1) [
478 Aff=0,
479 Any=1,
480 ],
481 AFF3 OFFSET(32) NUMBITS(8) [],
482 ]
483];
484register_bitfields! [
485 u32,
486 WAKER [
487 ProcessorSleep OFFSET(1) NUMBITS(1) [],
488 ChildrenAsleep OFFSET(2) NUMBITS(1) [],
489 ],
490];
491
492fn enable_group1() {
493 unsafe {
494 asm!(
495 "
496 MOV w0, #1
497 MSR ICC_IGRPEN1_EL1, x0
498 ISB"
499 )
500 }
501}