1use super::{
16 spill_weight_from_constraint, Env, LiveBundleIndex, LiveBundleVec, LiveRangeFlag,
17 LiveRangeIndex, LiveRangeKey, LiveRangeList, LiveRangeListEntry, PRegIndex, RegTraversalIter,
18 Requirement, SpillWeight, UseList, VRegIndex,
19};
20use crate::{
21 ion::data_structures::{
22 CodeRange, Use, BUNDLE_MAX_NORMAL_SPILL_WEIGHT, MAX_SPLITS_PER_SPILLSET,
23 MINIMAL_BUNDLE_SPILL_WEIGHT, MINIMAL_FIXED_BUNDLE_SPILL_WEIGHT,
24 },
25 Allocation, Function, Inst, InstPosition, OperandConstraint, OperandKind, PReg, ProgPoint,
26 RegAllocError,
27};
28use core::fmt::Debug;
29use smallvec::{smallvec, SmallVec};
30
31#[derive(Clone, Debug, PartialEq, Eq)]
32pub enum AllocRegResult<'a> {
33 Allocated(Allocation),
34 Conflict(&'a [LiveBundleIndex], ProgPoint),
35 ConflictWithFixed(u32, ProgPoint),
36 ConflictHighCost,
37}
38
39impl<'a, F: Function> Env<'a, F> {
40 pub fn process_bundles(&mut self) -> Result<(), RegAllocError> {
41 while let Some((bundle, hint)) = self.ctx.allocation_queue.pop() {
42 self.ctx.output.stats.process_bundle_count += 1;
43 self.process_bundle(bundle, hint)?;
44 }
45 self.ctx.output.stats.final_liverange_count = self.ranges.len();
46 self.ctx.output.stats.final_bundle_count = self.bundles.len();
47 self.ctx.output.stats.spill_bundle_count = self.spilled_bundles.len();
48
49 Ok(())
50 }
51
52 pub fn try_to_allocate_bundle_to_reg<'b>(
53 &mut self,
54 bundle: LiveBundleIndex,
55 reg: PRegIndex,
56 max_allowable_cost: Option<u32>,
60 conflicts: &'b mut LiveBundleVec,
61 ) -> AllocRegResult<'b> {
62 trace!("try_to_allocate_bundle_to_reg: {:?} -> {:?}", bundle, reg);
63 conflicts.clear();
64 self.ctx.conflict_set.clear();
65 let mut max_conflict_weight = 0;
66 let bundle_ranges = &self.ctx.bundles[bundle].ranges;
80 let from_key = LiveRangeKey::from_range(&CodeRange {
81 from: bundle_ranges.first().unwrap().range.from,
82 to: bundle_ranges.first().unwrap().range.from,
83 });
84 let mut preg_range_iter = self.ctx.pregs[reg.index()]
85 .allocations
86 .btree
87 .range(from_key..)
88 .peekable();
89 trace!(
90 "alloc map for {:?} in range {:?}..: {:?}",
91 reg,
92 from_key,
93 self.ctx.pregs[reg.index()].allocations.btree
94 );
95 let mut first_conflict: Option<ProgPoint> = None;
96
97 'ranges: for entry in bundle_ranges {
98 trace!(" -> range LR {:?}: {:?}", entry.index, entry.range);
99 let key = LiveRangeKey::from_range(&entry.range);
100
101 let mut skips = 0;
102 'alloc: loop {
103 trace!(" -> PReg range {:?}", preg_range_iter.peek());
104
105 if preg_range_iter.peek().is_some() && *preg_range_iter.peek().unwrap().0 < key {
110 trace!(
111 "Skipping PReg range {:?}",
112 preg_range_iter.peek().unwrap().0
113 );
114 preg_range_iter.next();
115 skips += 1;
116 if skips >= 16 {
117 let from_pos = entry.range.from;
118 let from_key = LiveRangeKey::from_range(&CodeRange {
119 from: from_pos,
120 to: from_pos,
121 });
122 preg_range_iter = self.ctx.pregs[reg.index()]
123 .allocations
124 .btree
125 .range(from_key..)
126 .peekable();
127 skips = 0;
128 }
129 continue 'alloc;
130 }
131 skips = 0;
132
133 if preg_range_iter.peek().is_none() {
135 trace!(" -> no more PReg allocations; so no conflict possible!");
136 break 'ranges;
137 }
138
139 if *preg_range_iter.peek().unwrap().0 > key {
141 trace!(
142 " -> next PReg allocation is at {:?}; moving to next VReg range",
143 preg_range_iter.peek().unwrap().0
144 );
145 break 'alloc;
146 }
147
148 let preg_key = *preg_range_iter.peek().unwrap().0;
150 debug_assert_eq!(preg_key, key); let preg_range = preg_range_iter.next().unwrap().1;
152
153 trace!(" -> btree contains range {:?} that overlaps", preg_range);
154 if preg_range.is_valid() {
155 trace!(" -> from vreg {:?}", self.ctx.ranges[*preg_range].vreg);
156 let conflict_bundle = self.ctx.ranges[*preg_range].bundle;
159 trace!(" -> conflict bundle {:?}", conflict_bundle);
160 if self.ctx.conflict_set.insert(conflict_bundle) {
161 conflicts.push(conflict_bundle);
162 max_conflict_weight = core::cmp::max(
163 max_conflict_weight,
164 self.ctx.bundles[conflict_bundle].cached_spill_weight(),
165 );
166 if max_allowable_cost.is_some()
167 && max_conflict_weight > max_allowable_cost.unwrap()
168 {
169 trace!(" -> reached high cost, retrying early");
170 return AllocRegResult::ConflictHighCost;
171 }
172 }
173
174 if first_conflict.is_none() {
175 first_conflict = Some(ProgPoint::from_index(core::cmp::max(
176 preg_key.from,
177 key.from,
178 )));
179 }
180 } else {
181 trace!(" -> conflict with fixed reservation");
182 return AllocRegResult::ConflictWithFixed(
184 max_conflict_weight,
185 ProgPoint::from_index(preg_key.from),
186 );
187 }
188 }
189 }
190
191 if conflicts.len() > 0 {
192 return AllocRegResult::Conflict(conflicts, first_conflict.unwrap());
193 }
194
195 let preg = PReg::from_index(reg.index());
197 trace!(" -> bundle {:?} assigned to preg {:?}", bundle, preg);
198 self.ctx.bundles[bundle].allocation = Allocation::reg(preg);
199 for entry in &self.ctx.bundles[bundle].ranges {
200 let key = LiveRangeKey::from_range(&entry.range);
201 let res = self.ctx.pregs[reg.index()]
202 .allocations
203 .btree
204 .insert(key, entry.index);
205
206 debug_assert!(res.is_none());
208 }
209
210 AllocRegResult::Allocated(Allocation::reg(preg))
211 }
212
213 pub fn evict_bundle(&mut self, bundle: LiveBundleIndex) {
214 trace!(
215 "evicting bundle {:?}: alloc {:?}",
216 bundle,
217 self.ctx.bundles[bundle].allocation
218 );
219 let preg = match self.ctx.bundles[bundle].allocation.as_reg() {
220 Some(preg) => preg,
221 None => {
222 trace!(
223 " -> has no allocation! {:?}",
224 self.ctx.bundles[bundle].allocation
225 );
226 return;
227 }
228 };
229 let preg_idx = PRegIndex::new(preg.index());
230 self.ctx.bundles[bundle].allocation = Allocation::none();
231 for entry in &self.ctx.bundles[bundle].ranges {
232 trace!(" -> removing LR {:?} from reg {:?}", entry.index, preg_idx);
233 self.ctx.pregs[preg_idx.index()]
234 .allocations
235 .btree
236 .remove(&LiveRangeKey::from_range(&entry.range));
237 }
238 let prio = self.ctx.bundles[bundle].prio;
239 trace!(" -> prio {}; back into queue", prio);
240 self.ctx
241 .allocation_queue
242 .insert(bundle, prio as usize, PReg::invalid());
243 }
244
245 pub fn bundle_spill_weight(&self, bundle: LiveBundleIndex) -> u32 {
246 self.ctx.bundles[bundle].cached_spill_weight()
247 }
248
249 pub fn maximum_spill_weight_in_bundle_set(&self, bundles: &[LiveBundleIndex]) -> u32 {
250 trace!("maximum_spill_weight_in_bundle_set: {:?}", bundles);
251 let m = bundles
252 .iter()
253 .map(|&b| {
254 let w = self.ctx.bundles[b].cached_spill_weight();
255 trace!("bundle{}: {}", b.index(), w);
256 w
257 })
258 .max()
259 .unwrap_or(0);
260 trace!(" -> max: {}", m);
261 m
262 }
263
264 pub fn recompute_bundle_properties(&mut self, bundle: LiveBundleIndex) {
265 trace!("recompute bundle properties: bundle {:?}", bundle);
266
267 let minimal;
268 let mut fixed = false;
269 let mut fixed_def = false;
270 let mut stack = false;
271 let bundledata = &self.ctx.bundles[bundle];
272 let num_ranges = bundledata.ranges.len();
273 let first_range = bundledata.ranges[0].index;
274 let first_range_data = &self.ctx.ranges[first_range];
275
276 self.ctx.bundles[bundle].prio = self.compute_bundle_prio(bundle);
277 self.ctx.bundles[bundle].limit = self.compute_bundle_limit(bundle);
278
279 if first_range_data.vreg.is_invalid() {
280 trace!(" -> no vreg; minimal and fixed");
281 minimal = true;
282 fixed = true;
283 } else if num_ranges == 1 {
284 for u in &first_range_data.uses {
285 trace!(" -> use: {:?}", u);
286 if let OperandConstraint::FixedReg(_) = u.operand.constraint() {
287 trace!(" -> fixed operand at {:?}: {:?}", u.pos, u.operand);
288 fixed = true;
289 if u.operand.kind() == OperandKind::Def {
290 trace!(" -> is fixed def");
291 fixed_def = true;
292 }
293 }
294 if let OperandConstraint::Stack = u.operand.constraint() {
295 trace!(" -> stack operand at {:?}: {:?}", u.pos, u.operand);
296 stack = true;
297 }
298 if stack && fixed {
299 break;
300 }
301 }
302 minimal = match &first_range_data.uses[..] {
306 [] => true,
307 [only_use] => {
308 let min_range = minimal_range_for_use(only_use);
315 min_range.contains(&first_range_data.range)
316 }
317 _ => false,
318 };
319 trace!(" -> minimal: {}", minimal);
320 } else {
321 minimal = false;
322 }
323
324 let spill_weight = if minimal {
325 if fixed {
326 trace!(" -> fixed and minimal");
327 MINIMAL_FIXED_BUNDLE_SPILL_WEIGHT
328 } else {
329 trace!(" -> non-fixed and minimal");
330 MINIMAL_BUNDLE_SPILL_WEIGHT
331 }
332 } else {
333 let mut total = SpillWeight::zero();
334 for entry in &self.ctx.bundles[bundle].ranges {
335 let range_data = &self.ctx.ranges[entry.index];
336 trace!(
337 " -> uses spill weight: +{:?}",
338 range_data.uses_spill_weight()
339 );
340 total = total + range_data.uses_spill_weight();
341 }
342
343 if self.ctx.bundles[bundle].prio > 0 {
344 let final_weight = (total.to_f32() as u32) / self.ctx.bundles[bundle].prio;
345 trace!(
346 " -> dividing by prio {}; final weight {}",
347 self.ctx.bundles[bundle].prio,
348 final_weight
349 );
350 core::cmp::min(BUNDLE_MAX_NORMAL_SPILL_WEIGHT, final_weight)
351 } else {
352 0
353 }
354 };
355
356 self.ctx.bundles[bundle].set_cached_spill_weight_and_props(
357 spill_weight,
358 minimal,
359 fixed,
360 fixed_def,
361 stack,
362 );
363 }
364
365 pub fn minimal_bundle(&self, bundle: LiveBundleIndex) -> bool {
366 self.ctx.bundles[bundle].cached_minimal()
367 }
368
369 pub fn recompute_range_properties(&mut self, range: LiveRangeIndex) {
370 let rangedata = &mut self.ctx.ranges[range];
371 let mut w = SpillWeight::zero();
372 for u in &rangedata.uses {
373 w = w + SpillWeight::from_bits(u.weight);
374 trace!("range{}: use {:?}", range.index(), u);
375 }
376 rangedata.set_uses_spill_weight(w);
377 if rangedata.uses.len() > 0 && rangedata.uses[0].operand.kind() == OperandKind::Def {
378 rangedata.set_flag(LiveRangeFlag::StartsAtDef);
385 }
386 }
387
388 pub fn get_or_create_spill_bundle(
389 &mut self,
390 bundle: LiveBundleIndex,
391 create_if_absent: bool,
392 ) -> Option<LiveBundleIndex> {
393 let ssidx = self.ctx.bundles[bundle].spillset;
394 let idx = self.ctx.spillsets[ssidx].spill_bundle;
395 if idx.is_valid() {
396 Some(idx)
397 } else if create_if_absent {
398 let idx = self.ctx.bundles.add(self.ctx.bump());
399 self.ctx.spillsets[ssidx].spill_bundle = idx;
400 self.ctx.bundles[idx].spillset = ssidx;
401 self.ctx.spilled_bundles.push(idx);
402 Some(idx)
403 } else {
404 None
405 }
406 }
407
408 pub fn split_and_requeue_bundle(
409 &mut self,
410 bundle: LiveBundleIndex,
411 mut split_at: ProgPoint,
412 hint: PReg,
413 mut trim_ends_into_spill_bundle: bool,
416 ) {
417 self.ctx.output.stats.splits += 1;
418 trace!(
419 "split bundle {bundle:?} at {split_at:?} and requeue with reg hint (for first part) {hint:?}"
420 );
421
422 let spillset = self.ctx.bundles[bundle].spillset;
427
428 if self.ctx.spillsets[spillset].splits >= MAX_SPLITS_PER_SPILLSET {
433 self.split_into_minimal_bundles(bundle, hint);
434 return;
435 }
436 self.ctx.spillsets[spillset].splits += 1;
437
438 debug_assert!(!self.ctx.bundles[bundle].ranges.is_empty());
439 let bundle_start = self.ctx.bundles[bundle].ranges.first().unwrap().range.from;
442 debug_assert!(split_at >= bundle_start);
443 let bundle_end = self.ctx.bundles[bundle].ranges.last().unwrap().range.to;
444 debug_assert!(split_at < bundle_end);
445
446 trace!(
447 "bundle_start = {:?} bundle_end = {:?}",
448 bundle_start,
449 bundle_end
450 );
451
452 if bundle_end.prev().inst() == bundle_start.inst() {
457 trace!(" -> spans only one inst; splitting into minimal bundles");
458 self.split_into_minimal_bundles(bundle, hint);
459 return;
460 }
461
462 if split_at == bundle_start {
467 let mut first_use = None;
469 'outer: for entry in &self.ctx.bundles[bundle].ranges {
470 for u in &self.ctx.ranges[entry.index].uses {
471 first_use = Some(u.pos);
472 break 'outer;
473 }
474 }
475 trace!(" -> first use loc is {:?}", first_use);
476 split_at = match first_use {
477 Some(pos) => {
478 if pos.inst() == bundle_start.inst() {
479 ProgPoint::before(pos.inst().next())
480 } else {
481 ProgPoint::before(pos.inst())
482 }
483 }
484 None => ProgPoint::before(
485 self.ctx.bundles[bundle]
486 .ranges
487 .first()
488 .unwrap()
489 .range
490 .from
491 .inst()
492 .next(),
493 ),
494 };
495 trace!(
496 "split point is at bundle start; advancing to {:?}",
497 split_at
498 );
499 } else {
500 if split_at.pos() == InstPosition::After {
504 split_at = split_at.next();
505 }
506 if split_at >= bundle_end {
507 split_at = split_at.prev().prev();
508 }
509 }
510
511 debug_assert!(split_at > bundle_start && split_at < bundle_end);
512
513 trace!(" -> LRs: {:?}", self.ctx.bundles[bundle].ranges);
518
519 let mut last_lr_in_old_bundle_idx = 0; let mut first_lr_in_new_bundle_idx = 0; for (i, entry) in self.ctx.bundles[bundle].ranges.iter().enumerate() {
522 if split_at > entry.range.from {
523 last_lr_in_old_bundle_idx = i;
524 first_lr_in_new_bundle_idx = i;
525 }
526 if split_at < entry.range.to {
527 first_lr_in_new_bundle_idx = i;
528
529 if self.ctx.bundles[bundle].cached_fixed() {
532 for u in &self.ctx.ranges[entry.index].uses {
533 if u.pos < split_at {
534 continue;
535 }
536
537 if matches!(u.operand.constraint(), OperandConstraint::FixedReg { .. }) {
538 split_at = ProgPoint::before(u.pos.inst());
539
540 if split_at > entry.range.from {
541 last_lr_in_old_bundle_idx = i;
542 }
543
544 trace!(" -> advancing split point to {split_at:?}");
545
546 trim_ends_into_spill_bundle = false;
547
548 break;
549 }
550 }
551 }
552
553 break;
554 }
555 }
556
557 trace!(
558 " -> last LR in old bundle: LR {:?}",
559 self.ctx.bundles[bundle].ranges[last_lr_in_old_bundle_idx]
560 );
561 trace!(
562 " -> first LR in new bundle: LR {:?}",
563 self.ctx.bundles[bundle].ranges[first_lr_in_new_bundle_idx]
564 );
565
566 let mut new_lr_list: LiveRangeList = LiveRangeList::new_in(self.ctx.bump());
568 new_lr_list.extend(
569 self.ctx.bundles[bundle]
570 .ranges
571 .iter()
572 .cloned()
573 .skip(first_lr_in_new_bundle_idx),
574 );
575 self.ctx.bundles[bundle]
576 .ranges
577 .truncate(last_lr_in_old_bundle_idx + 1);
578 self.ctx.bundles[bundle].ranges.shrink_to_fit();
579
580 if split_at > new_lr_list[0].range.from {
584 debug_assert_eq!(last_lr_in_old_bundle_idx, first_lr_in_new_bundle_idx);
585 let orig_lr = new_lr_list[0].index;
586 let new_lr = self.ctx.ranges.add(
587 CodeRange {
588 from: split_at,
589 to: new_lr_list[0].range.to,
590 },
591 self.ctx.bump(),
592 );
593 self.ctx.ranges[new_lr].vreg = self.ranges[orig_lr].vreg;
594 trace!(" -> splitting LR {:?} into {:?}", orig_lr, new_lr);
595 let first_use = self.ctx.ranges[orig_lr]
596 .uses
597 .iter()
598 .position(|u| u.pos >= split_at)
599 .unwrap_or(self.ctx.ranges[orig_lr].uses.len());
600 let mut rest_uses = UseList::new_in(self.ctx.bump());
601 rest_uses.extend(
602 self.ctx.ranges[orig_lr]
603 .uses
604 .iter()
605 .cloned()
606 .skip(first_use),
607 );
608 self.ctx.ranges[new_lr].uses = rest_uses;
609 self.ctx.ranges[orig_lr].uses.truncate(first_use);
610 self.ctx.ranges[orig_lr].uses.shrink_to_fit();
611 self.recompute_range_properties(orig_lr);
612 self.recompute_range_properties(new_lr);
613 new_lr_list[0].index = new_lr;
614 new_lr_list[0].range = self.ctx.ranges[new_lr].range;
615 self.ctx.ranges[orig_lr].range.to = split_at;
616 self.ctx.bundles[bundle].ranges[last_lr_in_old_bundle_idx].range =
617 self.ctx.ranges[orig_lr].range;
618
619 self.ctx.vregs[self.ctx.ranges[new_lr].vreg]
626 .ranges
627 .push(LiveRangeListEntry {
628 range: self.ctx.ranges[new_lr].range,
629 index: new_lr,
630 });
631 }
632
633 let new_bundle = self.ctx.bundles.add(self.ctx.bump());
634 trace!(" -> creating new bundle {:?}", new_bundle);
635 self.ctx.bundles[new_bundle].spillset = spillset;
636 for entry in &new_lr_list {
637 self.ctx.ranges[entry.index].bundle = new_bundle;
638 }
639 self.ctx.bundles[new_bundle].ranges = new_lr_list;
640
641 if trim_ends_into_spill_bundle {
642 while let Some(entry) = self.ctx.bundles[bundle].ranges.last().cloned() {
650 let end = entry.range.to;
651 let vreg = self.ctx.ranges[entry.index].vreg;
652 let last_use = self.ctx.ranges[entry.index].uses.last().map(|u| u.pos);
653 if last_use.is_none() {
654 let spill = self
655 .get_or_create_spill_bundle(bundle, true)
656 .unwrap();
657 trace!(
658 " -> bundle {:?} range {:?}: no uses; moving to spill bundle {:?}",
659 bundle,
660 entry.index,
661 spill
662 );
663 self.ctx.bundles[spill].ranges.push(entry);
664 self.ctx.bundles[bundle].ranges.pop();
665 self.ctx.ranges[entry.index].bundle = spill;
666 continue;
667 }
668 let last_use = last_use.unwrap();
669 let split = ProgPoint::before(last_use.inst().next());
670 if split < end {
671 let spill = self
672 .get_or_create_spill_bundle(bundle, true)
673 .unwrap();
674 self.ctx.bundles[bundle].ranges.last_mut().unwrap().range.to = split;
675 self.ctx.ranges[self.ctx.bundles[bundle].ranges.last().unwrap().index]
676 .range
677 .to = split;
678 let range = CodeRange {
679 from: split,
680 to: end,
681 };
682 let empty_lr = self.ctx.ranges.add(range, self.ctx.bump());
683 self.ctx.bundles[spill].ranges.push(LiveRangeListEntry {
684 range,
685 index: empty_lr,
686 });
687 self.ctx.ranges[empty_lr].bundle = spill;
688 self.ctx.vregs[vreg].ranges.push(LiveRangeListEntry {
689 range,
690 index: empty_lr,
691 });
692 trace!(
693 " -> bundle {:?} range {:?}: last use implies split point {:?}",
694 bundle,
695 entry.index,
696 split
697 );
698 trace!(
699 " -> moving trailing empty region to new spill bundle {:?} with new LR {:?}",
700 spill,
701 empty_lr
702 );
703 }
704 break;
705 }
706 while let Some(entry) = self.ctx.bundles[new_bundle].ranges.first().cloned() {
707 if self.ctx.ranges[entry.index].has_flag(LiveRangeFlag::StartsAtDef) {
708 break;
709 }
710 let start = entry.range.from;
711 let vreg = self.ctx.ranges[entry.index].vreg;
712 let first_use = self.ctx.ranges[entry.index].uses.first().map(|u| u.pos);
713 if first_use.is_none() {
714 let spill = self
715 .get_or_create_spill_bundle(new_bundle, true)
716 .unwrap();
717 trace!(
718 " -> bundle {:?} range {:?}: no uses; moving to spill bundle {:?}",
719 new_bundle,
720 entry.index,
721 spill
722 );
723 self.ctx.bundles[spill].ranges.push(entry);
724 self.ctx.bundles[new_bundle].ranges.drain(..1);
725 self.ctx.ranges[entry.index].bundle = spill;
726 continue;
727 }
728 let first_use = first_use.unwrap();
729 let split = ProgPoint::before(first_use.inst());
730 if split > start {
731 let spill = self
732 .get_or_create_spill_bundle(new_bundle, true)
733 .unwrap();
734 self.ctx.bundles[new_bundle]
735 .ranges
736 .first_mut()
737 .unwrap()
738 .range
739 .from = split;
740 self.ctx.ranges[self.ctx.bundles[new_bundle].ranges.first().unwrap().index]
741 .range
742 .from = split;
743 let range = CodeRange {
744 from: start,
745 to: split,
746 };
747 let empty_lr = self.ctx.ranges.add(range, self.ctx.bump());
748 self.ctx.bundles[spill].ranges.push(LiveRangeListEntry {
749 range,
750 index: empty_lr,
751 });
752 self.ctx.ranges[empty_lr].bundle = spill;
753 self.ctx.vregs[vreg].ranges.push(LiveRangeListEntry {
754 range,
755 index: empty_lr,
756 });
757 trace!(
758 " -> bundle {:?} range {:?}: first use implies split point {:?}",
759 bundle,
760 entry.index,
761 first_use,
762 );
763 trace!(
764 " -> moving leading empty region to new spill bundle {:?} with new LR {:?}",
765 spill,
766 empty_lr
767 );
768 }
769 break;
770 }
771 }
772
773 if self.ctx.bundles[bundle].ranges.len() > 0 {
774 self.recompute_bundle_properties(bundle);
775 let prio = self.ctx.bundles[bundle].prio;
776 self.ctx
777 .allocation_queue
778 .insert(bundle, prio as usize, hint);
779 }
780 if self.ctx.bundles[new_bundle].ranges.len() > 0 {
781 self.recompute_bundle_properties(new_bundle);
782 let prio = self.ctx.bundles[new_bundle].prio;
783 self.ctx
784 .allocation_queue
785 .insert(new_bundle, prio as usize, hint);
786 }
787 }
788
789 pub fn split_into_minimal_bundles(&mut self, bundle: LiveBundleIndex, hint: PReg) {
820 assert_eq!(self.ctx.scratch_removed_lrs_vregs.len(), 0);
821 self.ctx.scratch_removed_lrs.clear();
822
823 let mut new_lrs: SmallVec<[(VRegIndex, LiveRangeIndex); 16]> = smallvec![];
824 let mut new_bundles: SmallVec<[LiveBundleIndex; 16]> = smallvec![];
825
826 let spillset = self.ctx.bundles[bundle].spillset;
827 let spill = self
828 .get_or_create_spill_bundle(bundle, true)
829 .unwrap();
830
831 trace!("Splitting bundle {bundle:?} into minimal bundles with reg hint {hint:?}");
832
833 let mut spill_uses = UseList::new_in(self.ctx.bump());
834
835 let empty_vec = LiveRangeList::new_in(self.ctx.bump());
836 for entry in core::mem::replace(&mut self.ctx.bundles[bundle].ranges, empty_vec) {
837 let vreg = self.ctx.ranges[entry.index].vreg;
838
839 self.ctx.scratch_removed_lrs.insert(entry.index);
840 self.ctx.scratch_removed_lrs_vregs.insert(vreg);
841
842 trace!(" -> removing old LR {:?} for vreg {:?}", entry.index, vreg);
843
844 let mut spill_range = entry.range;
845 let mut spill_starts_def = false;
846
847 let empty_vec = UseList::new_in(self.ctx.bump());
848 for u in core::mem::replace(&mut self.ctx.ranges[entry.index].uses, empty_vec) {
849 trace!(" -> use {:?}", u);
850
851 let is_def = u.operand.kind() == OperandKind::Def;
852
853 if u.operand.constraint() == OperandConstraint::Any {
859 trace!(" -> migrating this any-constrained use to the spill range");
860 spill_uses.push(u);
861
862 spill_starts_def = spill_starts_def || is_def;
865
866 continue;
867 }
868
869 if is_def {
874 trace!(" -> moving the spill range forward by one");
875 spill_range.from = ProgPoint::before(u.pos.inst().next());
876 }
877
878 let cr = minimal_range_for_use(&u);
880 let lr = self.ctx.ranges.add(cr, self.ctx.bump());
881 new_lrs.push((vreg, lr));
882 self.ctx.ranges[lr].uses.push(u);
883 self.ctx.ranges[lr].vreg = vreg;
884
885 let new_bundle = self.ctx.bundles.add(self.ctx.bump());
887 self.ctx.ranges[lr].bundle = new_bundle;
888 self.ctx.bundles[new_bundle].spillset = spillset;
889 self.ctx.bundles[new_bundle]
890 .ranges
891 .push(LiveRangeListEntry {
892 range: cr,
893 index: lr,
894 });
895 new_bundles.push(new_bundle);
896
897 if is_def {
899 self.ctx.ranges[lr].set_flag(LiveRangeFlag::StartsAtDef);
900 }
901
902 trace!(
903 " -> created new LR {:?} range {:?} with new bundle {:?} for this use",
904 lr,
905 cr,
906 new_bundle
907 );
908 }
909
910 if !spill_range.is_empty() {
911 let spill_lr = self.ctx.ranges.add(spill_range, self.ctx.bump());
915 self.ctx.ranges[spill_lr].vreg = vreg;
916 self.ctx.ranges[spill_lr].bundle = spill;
917 self.ctx.ranges[spill_lr].uses.extend(spill_uses.drain(..));
918 new_lrs.push((vreg, spill_lr));
919
920 if spill_starts_def {
921 self.ctx.ranges[spill_lr].set_flag(LiveRangeFlag::StartsAtDef);
922 }
923
924 self.ctx.bundles[spill].ranges.push(LiveRangeListEntry {
925 range: spill_range,
926 index: spill_lr,
927 });
928 self.ctx.ranges[spill_lr].bundle = spill;
929 trace!(
930 " -> added spill range {:?} in new LR {:?} in spill bundle {:?}",
931 spill_range,
932 spill_lr,
933 spill
934 );
935 } else {
936 assert!(spill_uses.is_empty());
937 }
938 }
939
940 for vreg in self.ctx.scratch_removed_lrs_vregs.drain() {
942 let lrs = &mut self.ctx.scratch_removed_lrs;
943 self.ctx.vregs[vreg]
944 .ranges
945 .retain(|entry| !lrs.contains(&entry.index));
946 }
947
948 for (vreg, lr) in new_lrs {
950 let range = self.ctx.ranges[lr].range;
951 let entry = LiveRangeListEntry { range, index: lr };
952 self.ctx.vregs[vreg].ranges.push(entry);
953 }
954
955 for bundle in new_bundles {
958 if self.ctx.bundles[bundle].ranges.len() > 0 {
959 self.recompute_bundle_properties(bundle);
960 let prio = self.ctx.bundles[bundle].prio;
961 self.ctx
962 .allocation_queue
963 .insert(bundle, prio as usize, hint);
964 }
965 }
966 }
967
968 pub fn process_bundle(
969 &mut self,
970 bundle: LiveBundleIndex,
971 hint: PReg,
972 ) -> Result<(), RegAllocError> {
973 let class = self.ctx.spillsets[self.bundles[bundle].spillset].class;
974
975 let mut hint = if hint != PReg::invalid() {
977 hint
978 } else {
979 self.ctx.spillsets[self.bundles[bundle].spillset].hint
980 };
981 if self.ctx.pregs[hint.index()].is_stack {
982 hint = PReg::invalid();
983 }
984 trace!("process_bundle: bundle {bundle:?} hint {hint:?}");
985
986 let req = match self.compute_requirement(bundle) {
987 Ok(req) => req,
988 Err(conflict) => {
989 trace!("conflict!: {:?}", conflict);
990 debug_assert!(
994 !self.minimal_bundle(bundle),
995 "Minimal bundle with conflict!"
996 );
997 self.split_and_requeue_bundle(
998 bundle,
999 conflict.suggested_split_point(),
1000 hint,
1001 conflict.should_trim_edges_around_split(),
1003 );
1004 return Ok(());
1005 }
1006 };
1007
1008 match req {
1012 Requirement::Any => {
1013 if let Some(spill) =
1014 self.get_or_create_spill_bundle(bundle, false)
1015 {
1016 let empty_vec = LiveRangeList::new_in(self.ctx.bump());
1017 let mut list =
1018 core::mem::replace(&mut self.ctx.bundles[bundle].ranges, empty_vec);
1019 for entry in &list {
1020 self.ctx.ranges[entry.index].bundle = spill;
1021 }
1022 self.ctx.bundles[spill].ranges.extend(list.drain(..));
1023 return Ok(());
1024 }
1025 }
1026 _ => {}
1027 }
1028
1029 let mut attempts = 0;
1031 let mut scratch = core::mem::take(&mut self.ctx.scratch_conflicts);
1032 let mut lowest_cost_evict_conflict_set = core::mem::take(&mut self.ctx.scratch_bundle);
1033 'outer: loop {
1034 attempts += 1;
1035 trace!("attempt {}, req {:?}", attempts, req);
1036 debug_assert!(attempts < 100 * self.func.num_insts());
1037
1038 let fixed_preg = match req {
1039 Requirement::FixedReg(preg) | Requirement::FixedStack(preg) => Some(preg),
1040 Requirement::Register | Requirement::Limit(..) => None,
1041 Requirement::Stack => {
1042 let spillset = self.bundles[bundle].spillset;
1045 self.spillsets[spillset].required = true;
1046 return Ok(());
1047 }
1048
1049 Requirement::Any => {
1050 self.ctx.spilled_bundles.push(bundle);
1051 break;
1052 }
1053 };
1054 let mut lowest_cost_evict_conflict_cost: Option<u32> = None;
1057 lowest_cost_evict_conflict_set.clear();
1058
1059 let mut lowest_cost_split_conflict_cost: Option<u32> = None;
1060 let mut lowest_cost_split_conflict_point = ProgPoint::before(Inst::new(0));
1061 let mut lowest_cost_split_conflict_reg = PReg::invalid();
1062
1063 let scan_offset = self.ctx.ranges[self.bundles[bundle].ranges[0].index]
1069 .range
1070 .from
1071 .inst()
1072 .index()
1073 + bundle.index();
1074
1075 self.ctx.output.stats.process_bundle_reg_probe_start_any += 1;
1076 let limit = self.bundles[bundle].limit.map(|l| l as usize);
1077 for preg in RegTraversalIter::new(
1078 self.env,
1079 class,
1080 fixed_preg,
1081 hint.as_valid(),
1082 scan_offset,
1083 limit,
1084 ) {
1085 self.ctx.output.stats.process_bundle_reg_probes_any += 1;
1086 let preg_idx = PRegIndex::new(preg.index());
1087 trace!("trying preg {:?}", preg_idx);
1088
1089 let scan_limit_cost = match (
1090 lowest_cost_evict_conflict_cost,
1091 lowest_cost_split_conflict_cost,
1092 ) {
1093 (Some(a), Some(b)) => Some(core::cmp::max(a, b)),
1094 _ => None,
1095 };
1096 match self.try_to_allocate_bundle_to_reg(
1097 bundle,
1098 preg_idx,
1099 scan_limit_cost,
1100 &mut scratch,
1101 ) {
1102 AllocRegResult::Allocated(alloc) => {
1103 self.ctx.output.stats.process_bundle_reg_success_any += 1;
1104 trace!(" -> allocated to any {:?}", preg_idx);
1105 self.ctx.spillsets[self.ctx.bundles[bundle].spillset].hint =
1106 alloc.as_reg().unwrap();
1107 break 'outer;
1109 }
1110 AllocRegResult::Conflict(bundles, first_conflict_point) => {
1111 trace!(
1112 " -> conflict with bundles {:?}, first conflict at {:?}",
1113 bundles,
1114 first_conflict_point
1115 );
1116
1117 let conflict_cost = self.maximum_spill_weight_in_bundle_set(bundles);
1118
1119 if lowest_cost_evict_conflict_cost.is_none()
1120 || conflict_cost < lowest_cost_evict_conflict_cost.unwrap()
1121 {
1122 lowest_cost_evict_conflict_cost = Some(conflict_cost);
1123 lowest_cost_evict_conflict_set.clear();
1124 lowest_cost_evict_conflict_set.extend(bundles);
1125 }
1126
1127 let loop_depth =
1128 self.ctx.cfginfo.approx_loop_depth[self.ctx.cfginfo.insn_block
1129 [first_conflict_point.inst().index()]
1130 .index()];
1131 let move_cost = spill_weight_from_constraint(
1132 OperandConstraint::Reg,
1133 loop_depth as usize,
1134 true,
1135 )
1136 .to_int();
1137 if lowest_cost_split_conflict_cost.is_none()
1138 || (conflict_cost + move_cost)
1139 < lowest_cost_split_conflict_cost.unwrap()
1140 {
1141 lowest_cost_split_conflict_cost = Some(conflict_cost + move_cost);
1142 lowest_cost_split_conflict_point = first_conflict_point;
1143 lowest_cost_split_conflict_reg = preg;
1144 }
1145 }
1146 AllocRegResult::ConflictWithFixed(max_cost, point) => {
1147 trace!(" -> conflict with fixed alloc; cost of other bundles up to point is {}, conflict at {:?}", max_cost, point);
1148
1149 let loop_depth = self.ctx.cfginfo.approx_loop_depth
1150 [self.ctx.cfginfo.insn_block[point.inst().index()].index()];
1151 let move_cost = spill_weight_from_constraint(
1152 OperandConstraint::Reg,
1153 loop_depth as usize,
1154 true,
1155 )
1156 .to_int();
1157
1158 if lowest_cost_split_conflict_cost.is_none()
1159 || (max_cost + move_cost) < lowest_cost_split_conflict_cost.unwrap()
1160 {
1161 lowest_cost_split_conflict_cost = Some(max_cost + move_cost);
1162 lowest_cost_split_conflict_point = point;
1163 lowest_cost_split_conflict_reg = preg;
1164 }
1165 }
1166 AllocRegResult::ConflictHighCost => {
1167 continue;
1171 }
1172 }
1173 }
1174
1175 trace!(
1180 " -> lowest cost evict: set {:?}, cost {:?}",
1181 lowest_cost_evict_conflict_set,
1182 lowest_cost_evict_conflict_cost,
1183 );
1184 trace!(
1185 " -> lowest cost split: cost {:?}, point {:?}, reg {:?}",
1186 lowest_cost_split_conflict_cost,
1187 lowest_cost_split_conflict_point,
1188 lowest_cost_split_conflict_reg
1189 );
1190
1191 debug_assert!(
1193 lowest_cost_split_conflict_cost.is_some()
1194 || lowest_cost_evict_conflict_cost.is_some()
1195 );
1196
1197 let our_spill_weight = self.bundle_spill_weight(bundle);
1198 trace!(" -> our spill weight: {}", our_spill_weight);
1199
1200 if self.minimal_bundle(bundle)
1206 && (attempts >= 2
1207 || lowest_cost_evict_conflict_cost.is_none()
1208 || lowest_cost_evict_conflict_cost.unwrap() >= our_spill_weight)
1209 {
1210 if matches!(req, Requirement::Register | Requirement::Limit(_)) {
1211 let range = self.ctx.bundles[bundle].ranges[0].range;
1213 trace!("checking for too many live regs");
1214 let mut min_bundles_assigned = 0;
1215 let mut fixed_assigned = 0;
1216 let mut total_regs = 0;
1217 for preg in self.env.preferred_regs_by_class[class as u8 as usize]
1218 .iter()
1219 .chain(self.env.non_preferred_regs_by_class[class as u8 as usize].iter())
1220 {
1221 trace!(" -> PR {:?}", preg);
1222 let start = LiveRangeKey::from_range(&CodeRange {
1223 from: range.from.prev(),
1224 to: range.from.prev(),
1225 });
1226 for (key, lr) in self.ctx.pregs[preg.index()]
1227 .allocations
1228 .btree
1229 .range(start..)
1230 {
1231 let preg_range = key.to_range();
1232 if preg_range.to <= range.from {
1233 continue;
1234 }
1235 if preg_range.from >= range.to {
1236 break;
1237 }
1238 if lr.is_valid() {
1239 if self.minimal_bundle(self.ranges[*lr].bundle) {
1240 trace!(" -> min bundle {:?}", lr);
1241 min_bundles_assigned += 1;
1242 } else {
1243 trace!(" -> non-min bundle {:?}", lr);
1244 }
1245 } else {
1246 trace!(" -> fixed bundle");
1247 fixed_assigned += 1;
1248 }
1249 }
1250
1251 if let Requirement::Limit(limit) = req {
1254 if preg.hw_enc() >= limit as usize {
1255 continue;
1256 }
1257 }
1258
1259 total_regs += 1;
1260 }
1261 trace!(
1262 " -> total {}, fixed {}, min {}",
1263 total_regs,
1264 fixed_assigned,
1265 min_bundles_assigned
1266 );
1267 if min_bundles_assigned + fixed_assigned >= total_regs {
1268 return Err(RegAllocError::TooManyLiveRegs);
1269 }
1270 }
1271
1272 panic!("Could not allocate minimal bundle, but the allocation problem should be possible to solve");
1273 }
1274
1275 if !self.minimal_bundle(bundle)
1289 && (attempts >= 2
1290 || lowest_cost_evict_conflict_cost.is_none()
1291 || our_spill_weight <= lowest_cost_evict_conflict_cost.unwrap())
1292 {
1293 trace!(
1294 " -> deciding to split: our spill weight is {}",
1295 self.bundle_spill_weight(bundle)
1296 );
1297 let bundle_start = self.ctx.bundles[bundle].ranges[0].range.from;
1298 let mut split_at_point =
1299 core::cmp::max(lowest_cost_split_conflict_point, bundle_start);
1300 let requeue_with_reg = lowest_cost_split_conflict_reg;
1301
1302 let bundle_start_depth = self.ctx.cfginfo.approx_loop_depth
1306 [self.ctx.cfginfo.insn_block[bundle_start.inst().index()].index()];
1307 let split_at_depth = self.ctx.cfginfo.approx_loop_depth
1308 [self.ctx.cfginfo.insn_block[split_at_point.inst().index()].index()];
1309 if split_at_depth > bundle_start_depth {
1310 for block in (self.ctx.cfginfo.insn_block[bundle_start.inst().index()].index()
1311 + 1)
1312 ..=self.ctx.cfginfo.insn_block[split_at_point.inst().index()].index()
1313 {
1314 if self.ctx.cfginfo.approx_loop_depth[block] > bundle_start_depth {
1315 split_at_point = self.ctx.cfginfo.block_entry[block];
1316 break;
1317 }
1318 }
1319 }
1320
1321 self.split_and_requeue_bundle(
1322 bundle,
1323 split_at_point,
1324 requeue_with_reg,
1325 true,
1326 );
1327
1328 break 'outer;
1330 } else {
1331 self.ctx.output.stats.evict_bundle_event += 1;
1333 for &bundle in &lowest_cost_evict_conflict_set {
1334 trace!(" -> evicting {:?}", bundle);
1335 self.evict_bundle(bundle);
1336 self.ctx.output.stats.evict_bundle_count += 1;
1337 }
1338 }
1339 }
1340
1341 self.ctx.scratch_conflicts = scratch;
1342 self.ctx.scratch_bundle = lowest_cost_evict_conflict_set;
1343 return Ok(());
1344 }
1345}
1346
1347fn minimal_range_for_use(u: &Use) -> CodeRange {
1358 let early = ProgPoint::before(u.pos.inst());
1359 let late = ProgPoint::after(u.pos.inst());
1360 let next_early = ProgPoint::before(u.pos.inst().next());
1361 match (u.pos.pos(), u.operand.kind()) {
1362 (InstPosition::Before, OperandKind::Def) => CodeRange {
1363 from: early,
1364 to: next_early,
1365 },
1366 (InstPosition::Before, OperandKind::Use) => CodeRange {
1367 from: early,
1368 to: late,
1369 },
1370 (InstPosition::After, OperandKind::Def) => CodeRange {
1371 from: late,
1372 to: next_early,
1373 },
1374 (InstPosition::After, OperandKind::Use) => CodeRange {
1375 from: early,
1376 to: next_early,
1377 },
1378 }
1379}