1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
use volatile_register::RO;
#[cfg(not(armv6m))]
use volatile_register::RW;
#[cfg(not(armv6m))]
use crate::peripheral::CPUID;
#[repr(C)]
pub struct RegisterBlock {
pub base: RO<u32>,
_reserved0: [u32; 15],
#[cfg(not(armv6m))]
pub pfr: [RO<u32>; 2],
#[cfg(armv6m)]
_reserved1: [u32; 2],
#[cfg(not(armv6m))]
pub dfr: RO<u32>,
#[cfg(armv6m)]
_reserved2: u32,
#[cfg(not(armv6m))]
pub afr: RO<u32>,
#[cfg(armv6m)]
_reserved3: u32,
#[cfg(not(armv6m))]
pub mmfr: [RO<u32>; 4],
#[cfg(armv6m)]
_reserved4: [u32; 4],
#[cfg(not(armv6m))]
pub isar: [RO<u32>; 5],
#[cfg(armv6m)]
_reserved5: [u32; 5],
_reserved6: u32,
#[cfg(not(armv6m))]
pub clidr: RO<u32>,
#[cfg(not(armv6m))]
pub ctr: RO<u32>,
#[cfg(not(armv6m))]
pub ccsidr: RO<u32>,
#[cfg(not(armv6m))]
pub csselr: RW<u32>,
}
#[cfg(not(armv6m))]
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum CsselrCacheType {
DataOrUnified = 0,
Instruction = 1,
}
#[cfg(not(armv6m))]
impl CPUID {
#[inline]
pub fn select_cache(&mut self, level: u8, ind: CsselrCacheType) {
const CSSELR_IND_POS: u32 = 0;
const CSSELR_IND_MASK: u32 = 1 << CSSELR_IND_POS;
const CSSELR_LEVEL_POS: u32 = 1;
const CSSELR_LEVEL_MASK: u32 = 0x7 << CSSELR_LEVEL_POS;
unsafe {
self.csselr.write(
((u32::from(level) << CSSELR_LEVEL_POS) & CSSELR_LEVEL_MASK)
| (((ind as u32) << CSSELR_IND_POS) & CSSELR_IND_MASK),
)
}
}
#[inline]
pub fn cache_num_sets_ways(&mut self, level: u8, ind: CsselrCacheType) -> (u16, u16) {
const CCSIDR_NUMSETS_POS: u32 = 13;
const CCSIDR_NUMSETS_MASK: u32 = 0x7FFF << CCSIDR_NUMSETS_POS;
const CCSIDR_ASSOCIATIVITY_POS: u32 = 3;
const CCSIDR_ASSOCIATIVITY_MASK: u32 = 0x3FF << CCSIDR_ASSOCIATIVITY_POS;
self.select_cache(level, ind);
crate::asm::dsb();
let ccsidr = self.ccsidr.read();
(
(1 + ((ccsidr & CCSIDR_NUMSETS_MASK) >> CCSIDR_NUMSETS_POS)) as u16,
(1 + ((ccsidr & CCSIDR_ASSOCIATIVITY_MASK) >> CCSIDR_ASSOCIATIVITY_POS)) as u16,
)
}
#[inline(always)]
pub fn cache_dminline() -> u32 {
const CTR_DMINLINE_POS: u32 = 16;
const CTR_DMINLINE_MASK: u32 = 0xF << CTR_DMINLINE_POS;
let ctr = unsafe { (*Self::PTR).ctr.read() };
(ctr & CTR_DMINLINE_MASK) >> CTR_DMINLINE_POS
}
#[inline(always)]
pub fn cache_iminline() -> u32 {
const CTR_IMINLINE_POS: u32 = 0;
const CTR_IMINLINE_MASK: u32 = 0xF << CTR_IMINLINE_POS;
let ctr = unsafe { (*Self::PTR).ctr.read() };
(ctr & CTR_IMINLINE_MASK) >> CTR_IMINLINE_POS
}
}