/src/regalloc2/src/ion/merge.rs
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * This file was initially derived from the files |
3 | | * `js/src/jit/BacktrackingAllocator.h` and |
4 | | * `js/src/jit/BacktrackingAllocator.cpp` in Mozilla Firefox, and was |
5 | | * originally licensed under the Mozilla Public License 2.0. We |
6 | | * subsequently relicensed it to Apache-2.0 WITH LLVM-exception (see |
7 | | * https://github.com/bytecodealliance/regalloc2/issues/7). |
8 | | * |
9 | | * Since the initial port, the design has been substantially evolved |
10 | | * and optimized. |
11 | | */ |
12 | | |
13 | | //! Bundle merging. |
14 | | |
15 | | use super::{ |
16 | | Env, LiveBundleIndex, LiveRangeIndex, SpillSet, SpillSetIndex, SpillSlotIndex, VRegIndex, |
17 | | }; |
18 | | use crate::{ |
19 | | ion::data_structures::BlockparamOut, Function, Inst, OperandConstraint, OperandKind, PReg, |
20 | | }; |
21 | | use alloc::format; |
22 | | use smallvec::smallvec; |
23 | | |
24 | | impl<'a, F: Function> Env<'a, F> { |
25 | 989k | pub fn merge_bundles(&mut self, from: LiveBundleIndex, to: LiveBundleIndex) -> bool { |
26 | 989k | if from == to { |
27 | | // Merge bundle into self -- trivial merge. |
28 | 13.8k | return true; |
29 | 976k | } |
30 | | trace!( |
31 | 0 | "merging from bundle{} to bundle{}", |
32 | 0 | from.index(), |
33 | 0 | to.index() |
34 | | ); |
35 | | |
36 | | // Both bundles must deal with the same RegClass. |
37 | 976k | let from_rc = self.spillsets[self.bundles[from.index()].spillset.index()].class; |
38 | 976k | let to_rc = self.spillsets[self.bundles[to.index()].spillset.index()].class; |
39 | 976k | if from_rc != to_rc { |
40 | 0 | trace!(" -> mismatching reg classes"); |
41 | 0 | return false; |
42 | 976k | } |
43 | 976k | |
44 | 976k | // If either bundle is already assigned (due to a pinned vreg), don't merge. |
45 | 976k | if self.bundles[from.index()].allocation.is_some() |
46 | 976k | || self.bundles[to.index()].allocation.is_some() |
47 | | { |
48 | 0 | trace!("one of the bundles is already assigned (pinned)"); |
49 | 0 | return false; |
50 | 976k | } |
51 | 976k | |
52 | 976k | #[cfg(debug_assertions)] |
53 | 976k | { |
54 | 976k | // Sanity check: both bundles should contain only ranges with appropriate VReg classes. |
55 | 976k | for entry in &self.bundles[from.index()].ranges { |
56 | 976k | let vreg = self.ranges[entry.index.index()].vreg; |
57 | 976k | debug_assert_eq!(from_rc, self.vreg(vreg).class()); |
58 | 976k | } |
59 | 976k | for entry in &self.bundles[to.index()].ranges { |
60 | 976k | let vreg = self.ranges[entry.index.index()].vreg; |
61 | 976k | debug_assert_eq!(to_rc, self.vreg(vreg).class()); |
62 | 976k | } |
63 | 976k | } |
64 | 976k | |
65 | 976k | // Check for overlap in LiveRanges and for conflicting |
66 | 976k | // requirements. |
67 | 976k | let ranges_from = &self.bundles[from.index()].ranges[..]; |
68 | 976k | let ranges_to = &self.bundles[to.index()].ranges[..]; |
69 | 976k | let mut idx_from = 0; |
70 | 976k | let mut idx_to = 0; |
71 | 976k | let mut range_count = 0; |
72 | 7.49M | while idx_from < ranges_from.len() && idx_to < ranges_to.len() { |
73 | 7.02M | range_count += 1; |
74 | 7.02M | if range_count > 200 { |
75 | | trace!( |
76 | 0 | "reached merge complexity (range_count = {}); exiting", |
77 | | range_count |
78 | | ); |
79 | | // Limit merge complexity. |
80 | 44 | return false; |
81 | 7.02M | } |
82 | 7.02M | |
83 | 7.02M | if ranges_from[idx_from].range.from >= ranges_to[idx_to].range.to { |
84 | 632k | idx_to += 1; |
85 | 6.39M | } else if ranges_to[idx_to].range.from >= ranges_from[idx_from].range.to { |
86 | 5.89M | idx_from += 1; |
87 | 5.89M | } else { |
88 | | // Overlap -- cannot merge. |
89 | | trace!( |
90 | 0 | " -> overlap between {:?} and {:?}, exiting", |
91 | 0 | ranges_from[idx_from].index, |
92 | 0 | ranges_to[idx_to].index |
93 | | ); |
94 | 501k | return false; |
95 | | } |
96 | | } |
97 | | |
98 | | // Avoid merging if either side has a fixed-reg def: this can |
99 | | // result in an impossible-to-solve allocation problem if |
100 | | // there is a fixed-reg use in the same reg on the same |
101 | | // instruction. |
102 | 474k | if self.bundles[from.index()].cached_fixed_def() |
103 | 471k | || self.bundles[to.index()].cached_fixed_def() |
104 | | { |
105 | 0 | trace!(" -> one bundle has a fixed def; aborting merge"); |
106 | 3.20k | return false; |
107 | 471k | } |
108 | 471k | |
109 | 471k | // Check for a requirements conflict. |
110 | 471k | if self.bundles[from.index()].cached_stack() |
111 | 458k | || self.bundles[from.index()].cached_fixed() |
112 | 414k | || self.bundles[to.index()].cached_stack() |
113 | 405k | || self.bundles[to.index()].cached_fixed() |
114 | | { |
115 | 84.8k | if self.merge_bundle_requirements(from, to).is_err() { |
116 | 0 | trace!(" -> conflicting requirements; aborting merge"); |
117 | 36.5k | return false; |
118 | 48.2k | } |
119 | 386k | } |
120 | | |
121 | 0 | trace!(" -> committing to merge"); |
122 | | |
123 | | // If we reach here, then the bundles do not overlap -- merge |
124 | | // them! We do this with a merge-sort-like scan over both |
125 | | // lists, building a new range list and replacing the list on |
126 | | // `to` when we're done. |
127 | 435k | if ranges_from.is_empty() { |
128 | | // `from` bundle is empty -- trivial merge. |
129 | 0 | trace!(" -> from bundle{} is empty; trivial merge", from.index()); |
130 | 0 | return true; |
131 | 435k | } |
132 | 435k | if ranges_to.is_empty() { |
133 | | // `to` bundle is empty -- just move the list over from |
134 | | // `from` and set `bundle` up-link on all ranges. |
135 | 0 | trace!(" -> to bundle{} is empty; trivial merge", to.index()); |
136 | 0 | let list = core::mem::replace(&mut self.bundles[from.index()].ranges, smallvec![]); |
137 | 0 | for entry in &list { |
138 | 0 | self.ranges[entry.index.index()].bundle = to; |
139 | 0 |
|
140 | 0 | if self.annotations_enabled { |
141 | 0 | self.annotate( |
142 | 0 | entry.range.from, |
143 | 0 | format!( |
144 | 0 | " MERGE range{} v{} from bundle{} to bundle{}", |
145 | 0 | entry.index.index(), |
146 | 0 | self.ranges[entry.index.index()].vreg.index(), |
147 | 0 | from.index(), |
148 | 0 | to.index(), |
149 | 0 | ), |
150 | 0 | ); |
151 | 0 | } |
152 | | } |
153 | 0 | self.bundles[to.index()].ranges = list; |
154 | 0 |
|
155 | 0 | if self.bundles[from.index()].cached_stack() { |
156 | 0 | self.bundles[to.index()].set_cached_stack(); |
157 | 0 | } |
158 | 0 | if self.bundles[from.index()].cached_fixed() { |
159 | 0 | self.bundles[to.index()].set_cached_fixed(); |
160 | 0 | } |
161 | | |
162 | 0 | return true; |
163 | 435k | } |
164 | | |
165 | | trace!( |
166 | 0 | "merging: ranges_from = {:?} ranges_to = {:?}", |
167 | | ranges_from, |
168 | | ranges_to |
169 | | ); |
170 | | |
171 | | // Two non-empty lists of LiveRanges: concatenate and |
172 | | // sort. This is faster than a mergesort-like merge into a new |
173 | | // list, empirically. |
174 | 435k | let from_list = core::mem::replace(&mut self.bundles[from.index()].ranges, smallvec![]); |
175 | 3.62M | for entry in &from_list { |
176 | 3.18M | self.ranges[entry.index.index()].bundle = to; |
177 | 3.18M | } |
178 | 435k | self.bundles[to.index()] |
179 | 435k | .ranges |
180 | 435k | .extend_from_slice(&from_list[..]); |
181 | 435k | self.bundles[to.index()] |
182 | 435k | .ranges |
183 | 22.9M | .sort_unstable_by_key(|entry| entry.range.from); <regalloc2::ion::data_structures::Env<regalloc2::fuzzing::func::Func>>::merge_bundles::{closure#0}Line | Count | Source | 183 | 22.9M | .sort_unstable_by_key(|entry| entry.range.from); |
Unexecuted instantiation: <regalloc2::ion::data_structures::Env<_>>::merge_bundles::{closure#0} |
184 | 435k | |
185 | 435k | if self.annotations_enabled { |
186 | 0 | trace!("merging: merged = {:?}", self.bundles[to.index()].ranges); |
187 | 435k | let mut last_range = None; |
188 | 4.20M | for i in 0..self.bundles[to.index()].ranges.len() { |
189 | 4.20M | let entry = self.bundles[to.index()].ranges[i]; |
190 | 4.20M | if last_range.is_some() { |
191 | 0 | debug_assert!(last_range.unwrap() < entry.range); |
192 | 435k | } |
193 | 4.20M | last_range = Some(entry.range); |
194 | 4.20M | |
195 | 4.20M | if self.ranges[entry.index.index()].bundle == from { |
196 | 0 | self.annotate( |
197 | 0 | entry.range.from, |
198 | 0 | format!( |
199 | 0 | " MERGE range{} v{} from bundle{} to bundle{}", |
200 | 0 | entry.index.index(), |
201 | 0 | self.ranges[entry.index.index()].vreg.index(), |
202 | 0 | from.index(), |
203 | 0 | to.index(), |
204 | 0 | ), |
205 | 0 | ); |
206 | 4.20M | } |
207 | | |
208 | | trace!( |
209 | 0 | " -> merged result for bundle{}: range{}", |
210 | 0 | to.index(), |
211 | 0 | entry.index.index(), |
212 | | ); |
213 | | } |
214 | 0 | } |
215 | | |
216 | 435k | if self.bundles[from.index()].spillset != self.bundles[to.index()].spillset { |
217 | 435k | let from_vregs = core::mem::replace( |
218 | 435k | &mut self.spillsets[self.bundles[from.index()].spillset.index()].vregs, |
219 | 435k | smallvec![], |
220 | | ); |
221 | 435k | let to_vregs = &mut self.spillsets[self.bundles[to.index()].spillset.index()].vregs; |
222 | 3.50M | for vreg in from_vregs { |
223 | 3.06M | if !to_vregs.contains(&vreg) { |
224 | 3.06M | to_vregs.push(vreg); |
225 | 3.06M | } |
226 | | } |
227 | 0 | } |
228 | | |
229 | 435k | if self.bundles[from.index()].cached_stack() { |
230 | 1.72k | self.bundles[to.index()].set_cached_stack(); |
231 | 433k | } |
232 | 435k | if self.bundles[from.index()].cached_fixed() { |
233 | 32.3k | self.bundles[to.index()].set_cached_fixed(); |
234 | 402k | } |
235 | | |
236 | 435k | true |
237 | 989k | } <regalloc2::ion::data_structures::Env<regalloc2::fuzzing::func::Func>>::merge_bundles Line | Count | Source | 25 | 989k | pub fn merge_bundles(&mut self, from: LiveBundleIndex, to: LiveBundleIndex) -> bool { | 26 | 989k | if from == to { | 27 | | // Merge bundle into self -- trivial merge. | 28 | 13.8k | return true; | 29 | 976k | } | 30 | | trace!( | 31 | 0 | "merging from bundle{} to bundle{}", | 32 | 0 | from.index(), | 33 | 0 | to.index() | 34 | | ); | 35 | | | 36 | | // Both bundles must deal with the same RegClass. | 37 | 976k | let from_rc = self.spillsets[self.bundles[from.index()].spillset.index()].class; | 38 | 976k | let to_rc = self.spillsets[self.bundles[to.index()].spillset.index()].class; | 39 | 976k | if from_rc != to_rc { | 40 | 0 | trace!(" -> mismatching reg classes"); | 41 | 0 | return false; | 42 | 976k | } | 43 | 976k | | 44 | 976k | // If either bundle is already assigned (due to a pinned vreg), don't merge. | 45 | 976k | if self.bundles[from.index()].allocation.is_some() | 46 | 976k | || self.bundles[to.index()].allocation.is_some() | 47 | | { | 48 | 0 | trace!("one of the bundles is already assigned (pinned)"); | 49 | 0 | return false; | 50 | 976k | } | 51 | 976k | | 52 | 976k | #[cfg(debug_assertions)] | 53 | 976k | { | 54 | 976k | // Sanity check: both bundles should contain only ranges with appropriate VReg classes. | 55 | 976k | for entry in &self.bundles[from.index()].ranges { | 56 | 976k | let vreg = self.ranges[entry.index.index()].vreg; | 57 | 976k | debug_assert_eq!(from_rc, self.vreg(vreg).class()); | 58 | 976k | } | 59 | 976k | for entry in &self.bundles[to.index()].ranges { | 60 | 976k | let vreg = self.ranges[entry.index.index()].vreg; | 61 | 976k | debug_assert_eq!(to_rc, self.vreg(vreg).class()); | 62 | 976k | } | 63 | 976k | } | 64 | 976k | | 65 | 976k | // Check for overlap in LiveRanges and for conflicting | 66 | 976k | // requirements. | 67 | 976k | let ranges_from = &self.bundles[from.index()].ranges[..]; | 68 | 976k | let ranges_to = &self.bundles[to.index()].ranges[..]; | 69 | 976k | let mut idx_from = 0; | 70 | 976k | let mut idx_to = 0; | 71 | 976k | let mut range_count = 0; | 72 | 7.49M | while idx_from < ranges_from.len() && idx_to < ranges_to.len() { | 73 | 7.02M | range_count += 1; | 74 | 7.02M | if range_count > 200 { | 75 | | trace!( | 76 | 0 | "reached merge complexity (range_count = {}); exiting", | 77 | | range_count | 78 | | ); | 79 | | // Limit merge complexity. | 80 | 44 | return false; | 81 | 7.02M | } | 82 | 7.02M | | 83 | 7.02M | if ranges_from[idx_from].range.from >= ranges_to[idx_to].range.to { | 84 | 632k | idx_to += 1; | 85 | 6.39M | } else if ranges_to[idx_to].range.from >= ranges_from[idx_from].range.to { | 86 | 5.89M | idx_from += 1; | 87 | 5.89M | } else { | 88 | | // Overlap -- cannot merge. | 89 | | trace!( | 90 | 0 | " -> overlap between {:?} and {:?}, exiting", | 91 | 0 | ranges_from[idx_from].index, | 92 | 0 | ranges_to[idx_to].index | 93 | | ); | 94 | 501k | return false; | 95 | | } | 96 | | } | 97 | | | 98 | | // Avoid merging if either side has a fixed-reg def: this can | 99 | | // result in an impossible-to-solve allocation problem if | 100 | | // there is a fixed-reg use in the same reg on the same | 101 | | // instruction. | 102 | 474k | if self.bundles[from.index()].cached_fixed_def() | 103 | 471k | || self.bundles[to.index()].cached_fixed_def() | 104 | | { | 105 | 0 | trace!(" -> one bundle has a fixed def; aborting merge"); | 106 | 3.20k | return false; | 107 | 471k | } | 108 | 471k | | 109 | 471k | // Check for a requirements conflict. | 110 | 471k | if self.bundles[from.index()].cached_stack() | 111 | 458k | || self.bundles[from.index()].cached_fixed() | 112 | 414k | || self.bundles[to.index()].cached_stack() | 113 | 405k | || self.bundles[to.index()].cached_fixed() | 114 | | { | 115 | 84.8k | if self.merge_bundle_requirements(from, to).is_err() { | 116 | 0 | trace!(" -> conflicting requirements; aborting merge"); | 117 | 36.5k | return false; | 118 | 48.2k | } | 119 | 386k | } | 120 | | | 121 | 0 | trace!(" -> committing to merge"); | 122 | | | 123 | | // If we reach here, then the bundles do not overlap -- merge | 124 | | // them! We do this with a merge-sort-like scan over both | 125 | | // lists, building a new range list and replacing the list on | 126 | | // `to` when we're done. | 127 | 435k | if ranges_from.is_empty() { | 128 | | // `from` bundle is empty -- trivial merge. | 129 | 0 | trace!(" -> from bundle{} is empty; trivial merge", from.index()); | 130 | 0 | return true; | 131 | 435k | } | 132 | 435k | if ranges_to.is_empty() { | 133 | | // `to` bundle is empty -- just move the list over from | 134 | | // `from` and set `bundle` up-link on all ranges. | 135 | 0 | trace!(" -> to bundle{} is empty; trivial merge", to.index()); | 136 | 0 | let list = core::mem::replace(&mut self.bundles[from.index()].ranges, smallvec![]); | 137 | 0 | for entry in &list { | 138 | 0 | self.ranges[entry.index.index()].bundle = to; | 139 | 0 |
| 140 | 0 | if self.annotations_enabled { | 141 | 0 | self.annotate( | 142 | 0 | entry.range.from, | 143 | 0 | format!( | 144 | 0 | " MERGE range{} v{} from bundle{} to bundle{}", | 145 | 0 | entry.index.index(), | 146 | 0 | self.ranges[entry.index.index()].vreg.index(), | 147 | 0 | from.index(), | 148 | 0 | to.index(), | 149 | 0 | ), | 150 | 0 | ); | 151 | 0 | } | 152 | | } | 153 | 0 | self.bundles[to.index()].ranges = list; | 154 | 0 |
| 155 | 0 | if self.bundles[from.index()].cached_stack() { | 156 | 0 | self.bundles[to.index()].set_cached_stack(); | 157 | 0 | } | 158 | 0 | if self.bundles[from.index()].cached_fixed() { | 159 | 0 | self.bundles[to.index()].set_cached_fixed(); | 160 | 0 | } | 161 | | | 162 | 0 | return true; | 163 | 435k | } | 164 | | | 165 | | trace!( | 166 | 0 | "merging: ranges_from = {:?} ranges_to = {:?}", | 167 | | ranges_from, | 168 | | ranges_to | 169 | | ); | 170 | | | 171 | | // Two non-empty lists of LiveRanges: concatenate and | 172 | | // sort. This is faster than a mergesort-like merge into a new | 173 | | // list, empirically. | 174 | 435k | let from_list = core::mem::replace(&mut self.bundles[from.index()].ranges, smallvec![]); | 175 | 3.62M | for entry in &from_list { | 176 | 3.18M | self.ranges[entry.index.index()].bundle = to; | 177 | 3.18M | } | 178 | 435k | self.bundles[to.index()] | 179 | 435k | .ranges | 180 | 435k | .extend_from_slice(&from_list[..]); | 181 | 435k | self.bundles[to.index()] | 182 | 435k | .ranges | 183 | 435k | .sort_unstable_by_key(|entry| entry.range.from); | 184 | 435k | | 185 | 435k | if self.annotations_enabled { | 186 | 0 | trace!("merging: merged = {:?}", self.bundles[to.index()].ranges); | 187 | 435k | let mut last_range = None; | 188 | 4.20M | for i in 0..self.bundles[to.index()].ranges.len() { | 189 | 4.20M | let entry = self.bundles[to.index()].ranges[i]; | 190 | 4.20M | if last_range.is_some() { | 191 | 0 | debug_assert!(last_range.unwrap() < entry.range); | 192 | 435k | } | 193 | 4.20M | last_range = Some(entry.range); | 194 | 4.20M | | 195 | 4.20M | if self.ranges[entry.index.index()].bundle == from { | 196 | 0 | self.annotate( | 197 | 0 | entry.range.from, | 198 | 0 | format!( | 199 | 0 | " MERGE range{} v{} from bundle{} to bundle{}", | 200 | 0 | entry.index.index(), | 201 | 0 | self.ranges[entry.index.index()].vreg.index(), | 202 | 0 | from.index(), | 203 | 0 | to.index(), | 204 | 0 | ), | 205 | 0 | ); | 206 | 4.20M | } | 207 | | | 208 | | trace!( | 209 | 0 | " -> merged result for bundle{}: range{}", | 210 | 0 | to.index(), | 211 | 0 | entry.index.index(), | 212 | | ); | 213 | | } | 214 | 0 | } | 215 | | | 216 | 435k | if self.bundles[from.index()].spillset != self.bundles[to.index()].spillset { | 217 | 435k | let from_vregs = core::mem::replace( | 218 | 435k | &mut self.spillsets[self.bundles[from.index()].spillset.index()].vregs, | 219 | 435k | smallvec![], | 220 | | ); | 221 | 435k | let to_vregs = &mut self.spillsets[self.bundles[to.index()].spillset.index()].vregs; | 222 | 3.50M | for vreg in from_vregs { | 223 | 3.06M | if !to_vregs.contains(&vreg) { | 224 | 3.06M | to_vregs.push(vreg); | 225 | 3.06M | } | 226 | | } | 227 | 0 | } | 228 | | | 229 | 435k | if self.bundles[from.index()].cached_stack() { | 230 | 1.72k | self.bundles[to.index()].set_cached_stack(); | 231 | 433k | } | 232 | 435k | if self.bundles[from.index()].cached_fixed() { | 233 | 32.3k | self.bundles[to.index()].set_cached_fixed(); | 234 | 402k | } | 235 | | | 236 | 435k | true | 237 | 989k | } |
Unexecuted instantiation: <regalloc2::ion::data_structures::Env<_>>::merge_bundles |
238 | | |
239 | 9.54k | pub fn merge_vreg_bundles(&mut self) { |
240 | | // Create a bundle for every vreg, initially. |
241 | 0 | trace!("merge_vreg_bundles: creating vreg bundles"); |
242 | 3.30M | for vreg in 0..self.vregs.len() { |
243 | 3.30M | let vreg = VRegIndex::new(vreg); |
244 | 3.30M | if self.vregs[vreg.index()].ranges.is_empty() { |
245 | 0 | continue; |
246 | 3.30M | } |
247 | 3.30M | |
248 | 3.30M | let bundle = self.create_bundle(); |
249 | 3.30M | self.bundles[bundle.index()].ranges = self.vregs[vreg.index()].ranges.clone(); |
250 | 0 | trace!("vreg v{} gets bundle{}", vreg.index(), bundle.index()); |
251 | 3.78M | for entry in &self.bundles[bundle.index()].ranges { |
252 | | trace!( |
253 | 0 | " -> with LR range{}: {:?}", |
254 | 0 | entry.index.index(), |
255 | | entry.range |
256 | | ); |
257 | 3.78M | self.ranges[entry.index.index()].bundle = bundle; |
258 | | } |
259 | | |
260 | 3.30M | let mut fixed = false; |
261 | 3.30M | let mut fixed_def = false; |
262 | 3.30M | let mut stack = false; |
263 | 3.78M | for entry in &self.bundles[bundle.index()].ranges { |
264 | 9.01M | for u in &self.ranges[entry.index.index()].uses { |
265 | 9.01M | if let OperandConstraint::FixedReg(_) = u.operand.constraint() { |
266 | 159k | fixed = true; |
267 | 159k | if u.operand.kind() == OperandKind::Def { |
268 | 15.0k | fixed_def = true; |
269 | 144k | } |
270 | 8.85M | } |
271 | 9.01M | if let OperandConstraint::Stack = u.operand.constraint() { |
272 | 3.97M | stack = true; |
273 | 5.03M | } |
274 | 9.01M | if fixed && stack && fixed_def { |
275 | 870 | break; |
276 | 9.01M | } |
277 | | } |
278 | | } |
279 | 3.30M | if fixed { |
280 | 119k | self.bundles[bundle.index()].set_cached_fixed(); |
281 | 3.18M | } |
282 | 3.30M | if fixed_def { |
283 | 15.0k | self.bundles[bundle.index()].set_cached_fixed_def(); |
284 | 3.28M | } |
285 | 3.30M | if stack { |
286 | 49.9k | self.bundles[bundle.index()].set_cached_stack(); |
287 | 3.25M | } |
288 | | |
289 | | // Create a spillslot for this bundle. |
290 | 3.30M | let ssidx = SpillSetIndex::new(self.spillsets.len()); |
291 | 3.30M | let reg = self.vreg(vreg); |
292 | 3.30M | let size = self.func.spillslot_size(reg.class()) as u8; |
293 | 3.30M | self.spillsets.push(SpillSet { |
294 | 3.30M | vregs: smallvec![vreg], |
295 | 3.30M | slot: SpillSlotIndex::invalid(), |
296 | 3.30M | size, |
297 | 3.30M | required: false, |
298 | 3.30M | class: reg.class(), |
299 | 3.30M | reg_hint: PReg::invalid(), |
300 | 3.30M | spill_bundle: LiveBundleIndex::invalid(), |
301 | 3.30M | splits: 0, |
302 | 3.30M | }); |
303 | 3.30M | self.bundles[bundle.index()].spillset = ssidx; |
304 | | } |
305 | | |
306 | 3.48M | for inst in 0..self.func.num_insts() { |
307 | 3.48M | let inst = Inst::new(inst); |
308 | | |
309 | | // Attempt to merge Reuse-constraint operand outputs with the |
310 | | // corresponding inputs. |
311 | 5.10M | for op in self.func.inst_operands(inst) { |
312 | 5.10M | if let OperandConstraint::Reuse(reuse_idx) = op.constraint() { |
313 | 627k | let src_vreg = op.vreg(); |
314 | 627k | let dst_vreg = self.func.inst_operands(inst)[reuse_idx].vreg(); |
315 | | |
316 | | trace!( |
317 | 0 | "trying to merge reused-input def: src {} to dst {}", |
318 | | src_vreg, |
319 | | dst_vreg |
320 | | ); |
321 | 627k | let src_bundle = |
322 | 627k | self.ranges[self.vregs[src_vreg.vreg()].ranges[0].index.index()].bundle; |
323 | 0 | debug_assert!(src_bundle.is_valid()); |
324 | 627k | let dest_bundle = |
325 | 627k | self.ranges[self.vregs[dst_vreg.vreg()].ranges[0].index.index()].bundle; |
326 | 0 | debug_assert!(dest_bundle.is_valid()); |
327 | 627k | self.merge_bundles(/* from */ dest_bundle, /* to */ src_bundle); |
328 | 4.47M | } |
329 | | } |
330 | | } |
331 | | |
332 | | // Attempt to merge blockparams with their inputs. |
333 | 362k | for i in 0..self.blockparam_outs.len() { |
334 | | let BlockparamOut { |
335 | 362k | from_vreg, to_vreg, .. |
336 | 362k | } = self.blockparam_outs[i]; |
337 | | trace!( |
338 | 0 | "trying to merge blockparam v{} with input v{}", |
339 | 0 | to_vreg.index(), |
340 | 0 | from_vreg.index() |
341 | | ); |
342 | 362k | let to_bundle = self.ranges[self.vregs[to_vreg.index()].ranges[0].index.index()].bundle; |
343 | 0 | debug_assert!(to_bundle.is_valid()); |
344 | 362k | let from_bundle = |
345 | 362k | self.ranges[self.vregs[from_vreg.index()].ranges[0].index.index()].bundle; |
346 | 0 | debug_assert!(from_bundle.is_valid()); |
347 | | trace!( |
348 | 0 | " -> from bundle{} to bundle{}", |
349 | 0 | from_bundle.index(), |
350 | 0 | to_bundle.index() |
351 | | ); |
352 | 362k | self.merge_bundles(from_bundle, to_bundle); |
353 | | } |
354 | | |
355 | 0 | trace!("done merging bundles"); |
356 | 9.54k | } <regalloc2::ion::data_structures::Env<regalloc2::fuzzing::func::Func>>::merge_vreg_bundles Line | Count | Source | 239 | 9.54k | pub fn merge_vreg_bundles(&mut self) { | 240 | | // Create a bundle for every vreg, initially. | 241 | 0 | trace!("merge_vreg_bundles: creating vreg bundles"); | 242 | 3.30M | for vreg in 0..self.vregs.len() { | 243 | 3.30M | let vreg = VRegIndex::new(vreg); | 244 | 3.30M | if self.vregs[vreg.index()].ranges.is_empty() { | 245 | 0 | continue; | 246 | 3.30M | } | 247 | 3.30M | | 248 | 3.30M | let bundle = self.create_bundle(); | 249 | 3.30M | self.bundles[bundle.index()].ranges = self.vregs[vreg.index()].ranges.clone(); | 250 | 0 | trace!("vreg v{} gets bundle{}", vreg.index(), bundle.index()); | 251 | 3.78M | for entry in &self.bundles[bundle.index()].ranges { | 252 | | trace!( | 253 | 0 | " -> with LR range{}: {:?}", | 254 | 0 | entry.index.index(), | 255 | | entry.range | 256 | | ); | 257 | 3.78M | self.ranges[entry.index.index()].bundle = bundle; | 258 | | } | 259 | | | 260 | 3.30M | let mut fixed = false; | 261 | 3.30M | let mut fixed_def = false; | 262 | 3.30M | let mut stack = false; | 263 | 3.78M | for entry in &self.bundles[bundle.index()].ranges { | 264 | 9.01M | for u in &self.ranges[entry.index.index()].uses { | 265 | 9.01M | if let OperandConstraint::FixedReg(_) = u.operand.constraint() { | 266 | 159k | fixed = true; | 267 | 159k | if u.operand.kind() == OperandKind::Def { | 268 | 15.0k | fixed_def = true; | 269 | 144k | } | 270 | 8.85M | } | 271 | 9.01M | if let OperandConstraint::Stack = u.operand.constraint() { | 272 | 3.97M | stack = true; | 273 | 5.03M | } | 274 | 9.01M | if fixed && stack && fixed_def { | 275 | 870 | break; | 276 | 9.01M | } | 277 | | } | 278 | | } | 279 | 3.30M | if fixed { | 280 | 119k | self.bundles[bundle.index()].set_cached_fixed(); | 281 | 3.18M | } | 282 | 3.30M | if fixed_def { | 283 | 15.0k | self.bundles[bundle.index()].set_cached_fixed_def(); | 284 | 3.28M | } | 285 | 3.30M | if stack { | 286 | 49.9k | self.bundles[bundle.index()].set_cached_stack(); | 287 | 3.25M | } | 288 | | | 289 | | // Create a spillslot for this bundle. | 290 | 3.30M | let ssidx = SpillSetIndex::new(self.spillsets.len()); | 291 | 3.30M | let reg = self.vreg(vreg); | 292 | 3.30M | let size = self.func.spillslot_size(reg.class()) as u8; | 293 | 3.30M | self.spillsets.push(SpillSet { | 294 | 3.30M | vregs: smallvec![vreg], | 295 | 3.30M | slot: SpillSlotIndex::invalid(), | 296 | 3.30M | size, | 297 | 3.30M | required: false, | 298 | 3.30M | class: reg.class(), | 299 | 3.30M | reg_hint: PReg::invalid(), | 300 | 3.30M | spill_bundle: LiveBundleIndex::invalid(), | 301 | 3.30M | splits: 0, | 302 | 3.30M | }); | 303 | 3.30M | self.bundles[bundle.index()].spillset = ssidx; | 304 | | } | 305 | | | 306 | 3.48M | for inst in 0..self.func.num_insts() { | 307 | 3.48M | let inst = Inst::new(inst); | 308 | | | 309 | | // Attempt to merge Reuse-constraint operand outputs with the | 310 | | // corresponding inputs. | 311 | 5.10M | for op in self.func.inst_operands(inst) { | 312 | 5.10M | if let OperandConstraint::Reuse(reuse_idx) = op.constraint() { | 313 | 627k | let src_vreg = op.vreg(); | 314 | 627k | let dst_vreg = self.func.inst_operands(inst)[reuse_idx].vreg(); | 315 | | | 316 | | trace!( | 317 | 0 | "trying to merge reused-input def: src {} to dst {}", | 318 | | src_vreg, | 319 | | dst_vreg | 320 | | ); | 321 | 627k | let src_bundle = | 322 | 627k | self.ranges[self.vregs[src_vreg.vreg()].ranges[0].index.index()].bundle; | 323 | 0 | debug_assert!(src_bundle.is_valid()); | 324 | 627k | let dest_bundle = | 325 | 627k | self.ranges[self.vregs[dst_vreg.vreg()].ranges[0].index.index()].bundle; | 326 | 0 | debug_assert!(dest_bundle.is_valid()); | 327 | 627k | self.merge_bundles(/* from */ dest_bundle, /* to */ src_bundle); | 328 | 4.47M | } | 329 | | } | 330 | | } | 331 | | | 332 | | // Attempt to merge blockparams with their inputs. | 333 | 362k | for i in 0..self.blockparam_outs.len() { | 334 | | let BlockparamOut { | 335 | 362k | from_vreg, to_vreg, .. | 336 | 362k | } = self.blockparam_outs[i]; | 337 | | trace!( | 338 | 0 | "trying to merge blockparam v{} with input v{}", | 339 | 0 | to_vreg.index(), | 340 | 0 | from_vreg.index() | 341 | | ); | 342 | 362k | let to_bundle = self.ranges[self.vregs[to_vreg.index()].ranges[0].index.index()].bundle; | 343 | 0 | debug_assert!(to_bundle.is_valid()); | 344 | 362k | let from_bundle = | 345 | 362k | self.ranges[self.vregs[from_vreg.index()].ranges[0].index.index()].bundle; | 346 | 0 | debug_assert!(from_bundle.is_valid()); | 347 | | trace!( | 348 | 0 | " -> from bundle{} to bundle{}", | 349 | 0 | from_bundle.index(), | 350 | 0 | to_bundle.index() | 351 | | ); | 352 | 362k | self.merge_bundles(from_bundle, to_bundle); | 353 | | } | 354 | | | 355 | 0 | trace!("done merging bundles"); | 356 | 9.54k | } |
Unexecuted instantiation: <regalloc2::ion::data_structures::Env<_>>::merge_vreg_bundles |
357 | | |
358 | 0 | pub fn resolve_merged_lr(&self, mut lr: LiveRangeIndex) -> LiveRangeIndex { |
359 | 0 | let mut iter = 0; |
360 | 0 | while iter < 100 && self.ranges[lr.index()].merged_into.is_valid() { |
361 | 0 | lr = self.ranges[lr.index()].merged_into; |
362 | 0 | iter += 1; |
363 | 0 | } |
364 | 0 | lr |
365 | 0 | } |
366 | | |
367 | 6.85M | pub fn compute_bundle_prio(&self, bundle: LiveBundleIndex) -> u32 { |
368 | 6.85M | // The priority is simply the total "length" -- the number of |
369 | 6.85M | // instructions covered by all LiveRanges. |
370 | 6.85M | let mut total = 0; |
371 | 9.20M | for entry in &self.bundles[bundle.index()].ranges { |
372 | 9.20M | total += entry.range.len() as u32; |
373 | 9.20M | } |
374 | 6.85M | total |
375 | 6.85M | } <regalloc2::ion::data_structures::Env<regalloc2::fuzzing::func::Func>>::compute_bundle_prio Line | Count | Source | 367 | 6.85M | pub fn compute_bundle_prio(&self, bundle: LiveBundleIndex) -> u32 { | 368 | 6.85M | // The priority is simply the total "length" -- the number of | 369 | 6.85M | // instructions covered by all LiveRanges. | 370 | 6.85M | let mut total = 0; | 371 | 9.20M | for entry in &self.bundles[bundle.index()].ranges { | 372 | 9.20M | total += entry.range.len() as u32; | 373 | 9.20M | } | 374 | 6.85M | total | 375 | 6.85M | } |
Unexecuted instantiation: <regalloc2::ion::data_structures::Env<_>>::compute_bundle_prio |
376 | | |
377 | 9.54k | pub fn queue_bundles(&mut self) { |
378 | 3.30M | for bundle in 0..self.bundles.len() { |
379 | 0 | trace!("enqueueing bundle{}", bundle); |
380 | 3.30M | if self.bundles[bundle].ranges.is_empty() { |
381 | 0 | trace!(" -> no ranges; skipping"); |
382 | 435k | continue; |
383 | 2.86M | } |
384 | 2.86M | let bundle = LiveBundleIndex::new(bundle); |
385 | 2.86M | let prio = self.compute_bundle_prio(bundle); |
386 | 0 | trace!(" -> prio {}", prio); |
387 | 2.86M | self.bundles[bundle.index()].prio = prio; |
388 | 2.86M | self.recompute_bundle_properties(bundle); |
389 | 2.86M | self.allocation_queue |
390 | 2.86M | .insert(bundle, prio as usize, PReg::invalid()); |
391 | | } |
392 | 9.54k | self.stats.merged_bundle_count = self.allocation_queue.heap.len(); |
393 | 9.54k | } <regalloc2::ion::data_structures::Env<regalloc2::fuzzing::func::Func>>::queue_bundles Line | Count | Source | 377 | 9.54k | pub fn queue_bundles(&mut self) { | 378 | 3.30M | for bundle in 0..self.bundles.len() { | 379 | 0 | trace!("enqueueing bundle{}", bundle); | 380 | 3.30M | if self.bundles[bundle].ranges.is_empty() { | 381 | 0 | trace!(" -> no ranges; skipping"); | 382 | 435k | continue; | 383 | 2.86M | } | 384 | 2.86M | let bundle = LiveBundleIndex::new(bundle); | 385 | 2.86M | let prio = self.compute_bundle_prio(bundle); | 386 | 0 | trace!(" -> prio {}", prio); | 387 | 2.86M | self.bundles[bundle.index()].prio = prio; | 388 | 2.86M | self.recompute_bundle_properties(bundle); | 389 | 2.86M | self.allocation_queue | 390 | 2.86M | .insert(bundle, prio as usize, PReg::invalid()); | 391 | | } | 392 | 9.54k | self.stats.merged_bundle_count = self.allocation_queue.heap.len(); | 393 | 9.54k | } |
Unexecuted instantiation: <regalloc2::ion::data_structures::Env<_>>::queue_bundles |
394 | | } |