|
47 | 47 | #include <linux/msi.h> |
48 | 48 | #include <linux/hyperv.h> |
49 | 49 | #include <linux/refcount.h> |
| 50 | +#include <linux/irqdomain.h> |
| 51 | +#include <linux/acpi.h> |
50 | 52 | #include <asm/mshyperv.h> |
51 | 53 |
|
52 | 54 | /* |
@@ -596,19 +598,237 @@ static unsigned int hv_msi_get_int_vector(struct irq_data *data) |
596 | 598 | return cfg->vector; |
597 | 599 | } |
598 | 600 |
|
599 | | -static void hv_set_msi_entry_from_desc(union hv_msi_entry *msi_entry, |
600 | | - struct msi_desc *msi_desc) |
| 601 | +static int hv_msi_prepare(struct irq_domain *domain, struct device *dev, |
| 602 | + int nvec, msi_alloc_info_t *info) |
601 | 603 | { |
602 | | - msi_entry->address.as_uint32 = msi_desc->msg.address_lo; |
603 | | - msi_entry->data.as_uint32 = msi_desc->msg.data; |
| 604 | + int ret = pci_msi_prepare(domain, dev, nvec, info); |
| 605 | + |
| 606 | + /* |
| 607 | + * By using the interrupt remapper in the hypervisor IOMMU, contiguous |
| 608 | + * CPU vectors is not needed for multi-MSI |
| 609 | + */ |
| 610 | + if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI) |
| 611 | + info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS; |
| 612 | + |
| 613 | + return ret; |
604 | 614 | } |
605 | 615 |
|
606 | | -static int hv_msi_prepare(struct irq_domain *domain, struct device *dev, |
607 | | - int nvec, msi_alloc_info_t *info) |
| 616 | +#elif defined(CONFIG_ARM64) |
| 617 | +/* |
| 618 | + * SPI vectors to use for vPCI; arch SPIs range is [32, 1019], but leaving a bit |
| 619 | + * of room at the start to allow for SPIs to be specified through ACPI and |
| 620 | + * starting with a power of two to satisfy power of 2 multi-MSI requirement. |
| 621 | + */ |
| 622 | +#define HV_PCI_MSI_SPI_START 64 |
| 623 | +#define HV_PCI_MSI_SPI_NR (1020 - HV_PCI_MSI_SPI_START) |
| 624 | +#define DELIVERY_MODE 0 |
| 625 | +#define FLOW_HANDLER NULL |
| 626 | +#define FLOW_NAME NULL |
| 627 | +#define hv_msi_prepare NULL |
| 628 | + |
| 629 | +struct hv_pci_chip_data { |
| 630 | + DECLARE_BITMAP(spi_map, HV_PCI_MSI_SPI_NR); |
| 631 | + struct mutex map_lock; |
| 632 | +}; |
| 633 | + |
| 634 | +/* Hyper-V vPCI MSI GIC IRQ domain */ |
| 635 | +static struct irq_domain *hv_msi_gic_irq_domain; |
| 636 | + |
| 637 | +/* Hyper-V PCI MSI IRQ chip */ |
| 638 | +static struct irq_chip hv_arm64_msi_irq_chip = { |
| 639 | + .name = "MSI", |
| 640 | + .irq_set_affinity = irq_chip_set_affinity_parent, |
| 641 | + .irq_eoi = irq_chip_eoi_parent, |
| 642 | + .irq_mask = irq_chip_mask_parent, |
| 643 | + .irq_unmask = irq_chip_unmask_parent |
| 644 | +}; |
| 645 | + |
| 646 | +static unsigned int hv_msi_get_int_vector(struct irq_data *irqd) |
| 647 | +{ |
| 648 | + return irqd->parent_data->hwirq; |
| 649 | +} |
| 650 | + |
| 651 | +/* |
| 652 | + * @nr_bm_irqs: Indicates the number of IRQs that were allocated from |
| 653 | + * the bitmap. |
| 654 | + * @nr_dom_irqs: Indicates the number of IRQs that were allocated from |
| 655 | + * the parent domain. |
| 656 | + */ |
| 657 | +static void hv_pci_vec_irq_free(struct irq_domain *domain, |
| 658 | + unsigned int virq, |
| 659 | + unsigned int nr_bm_irqs, |
| 660 | + unsigned int nr_dom_irqs) |
| 661 | +{ |
| 662 | + struct hv_pci_chip_data *chip_data = domain->host_data; |
| 663 | + struct irq_data *d = irq_domain_get_irq_data(domain, virq); |
| 664 | + int first = d->hwirq - HV_PCI_MSI_SPI_START; |
| 665 | + int i; |
| 666 | + |
| 667 | + mutex_lock(&chip_data->map_lock); |
| 668 | + bitmap_release_region(chip_data->spi_map, |
| 669 | + first, |
| 670 | + get_count_order(nr_bm_irqs)); |
| 671 | + mutex_unlock(&chip_data->map_lock); |
| 672 | + for (i = 0; i < nr_dom_irqs; i++) { |
| 673 | + if (i) |
| 674 | + d = irq_domain_get_irq_data(domain, virq + i); |
| 675 | + irq_domain_reset_irq_data(d); |
| 676 | + } |
| 677 | + |
| 678 | + irq_domain_free_irqs_parent(domain, virq, nr_dom_irqs); |
| 679 | +} |
| 680 | + |
| 681 | +static void hv_pci_vec_irq_domain_free(struct irq_domain *domain, |
| 682 | + unsigned int virq, |
| 683 | + unsigned int nr_irqs) |
| 684 | +{ |
| 685 | + hv_pci_vec_irq_free(domain, virq, nr_irqs, nr_irqs); |
| 686 | +} |
| 687 | + |
| 688 | +static int hv_pci_vec_alloc_device_irq(struct irq_domain *domain, |
| 689 | + unsigned int nr_irqs, |
| 690 | + irq_hw_number_t *hwirq) |
| 691 | +{ |
| 692 | + struct hv_pci_chip_data *chip_data = domain->host_data; |
| 693 | + int index; |
| 694 | + |
| 695 | + /* Find and allocate region from the SPI bitmap */ |
| 696 | + mutex_lock(&chip_data->map_lock); |
| 697 | + index = bitmap_find_free_region(chip_data->spi_map, |
| 698 | + HV_PCI_MSI_SPI_NR, |
| 699 | + get_count_order(nr_irqs)); |
| 700 | + mutex_unlock(&chip_data->map_lock); |
| 701 | + if (index < 0) |
| 702 | + return -ENOSPC; |
| 703 | + |
| 704 | + *hwirq = index + HV_PCI_MSI_SPI_START; |
| 705 | + |
| 706 | + return 0; |
| 707 | +} |
| 708 | + |
| 709 | +static int hv_pci_vec_irq_gic_domain_alloc(struct irq_domain *domain, |
| 710 | + unsigned int virq, |
| 711 | + irq_hw_number_t hwirq) |
| 712 | +{ |
| 713 | + struct irq_fwspec fwspec; |
| 714 | + struct irq_data *d; |
| 715 | + int ret; |
| 716 | + |
| 717 | + fwspec.fwnode = domain->parent->fwnode; |
| 718 | + fwspec.param_count = 2; |
| 719 | + fwspec.param[0] = hwirq; |
| 720 | + fwspec.param[1] = IRQ_TYPE_EDGE_RISING; |
| 721 | + |
| 722 | + ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); |
| 723 | + if (ret) |
| 724 | + return ret; |
| 725 | + |
| 726 | + /* |
| 727 | + * Since the interrupt specifier is not coming from ACPI or DT, the |
| 728 | + * trigger type will need to be set explicitly. Otherwise, it will be |
| 729 | + * set to whatever is in the GIC configuration. |
| 730 | + */ |
| 731 | + d = irq_domain_get_irq_data(domain->parent, virq); |
| 732 | + |
| 733 | + return d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING); |
| 734 | +} |
| 735 | + |
| 736 | +static int hv_pci_vec_irq_domain_alloc(struct irq_domain *domain, |
| 737 | + unsigned int virq, unsigned int nr_irqs, |
| 738 | + void *args) |
| 739 | +{ |
| 740 | + irq_hw_number_t hwirq; |
| 741 | + unsigned int i; |
| 742 | + int ret; |
| 743 | + |
| 744 | + ret = hv_pci_vec_alloc_device_irq(domain, nr_irqs, &hwirq); |
| 745 | + if (ret) |
| 746 | + return ret; |
| 747 | + |
| 748 | + for (i = 0; i < nr_irqs; i++) { |
| 749 | + ret = hv_pci_vec_irq_gic_domain_alloc(domain, virq + i, |
| 750 | + hwirq + i); |
| 751 | + if (ret) { |
| 752 | + hv_pci_vec_irq_free(domain, virq, nr_irqs, i); |
| 753 | + return ret; |
| 754 | + } |
| 755 | + |
| 756 | + irq_domain_set_hwirq_and_chip(domain, virq + i, |
| 757 | + hwirq + i, |
| 758 | + &hv_arm64_msi_irq_chip, |
| 759 | + domain->host_data); |
| 760 | + pr_debug("pID:%d vID:%u\n", (int)(hwirq + i), virq + i); |
| 761 | + } |
| 762 | + |
| 763 | + return 0; |
| 764 | +} |
| 765 | + |
| 766 | +/* |
| 767 | + * Pick the first cpu as the irq affinity that can be temporarily used for |
| 768 | + * composing MSI from the hypervisor. GIC will eventually set the right |
| 769 | + * affinity for the irq and the 'unmask' will retarget the interrupt to that |
| 770 | + * cpu. |
| 771 | + */ |
| 772 | +static int hv_pci_vec_irq_domain_activate(struct irq_domain *domain, |
| 773 | + struct irq_data *irqd, bool reserve) |
608 | 774 | { |
609 | | - return pci_msi_prepare(domain, dev, nvec, info); |
| 775 | + int cpu = cpumask_first(cpu_present_mask); |
| 776 | + |
| 777 | + irq_data_update_effective_affinity(irqd, cpumask_of(cpu)); |
| 778 | + |
| 779 | + return 0; |
610 | 780 | } |
611 | | -#endif /* CONFIG_X86 */ |
| 781 | + |
| 782 | +static const struct irq_domain_ops hv_pci_domain_ops = { |
| 783 | + .alloc = hv_pci_vec_irq_domain_alloc, |
| 784 | + .free = hv_pci_vec_irq_domain_free, |
| 785 | + .activate = hv_pci_vec_irq_domain_activate, |
| 786 | +}; |
| 787 | + |
| 788 | +static int hv_pci_irqchip_init(void) |
| 789 | +{ |
| 790 | + static struct hv_pci_chip_data *chip_data; |
| 791 | + struct fwnode_handle *fn = NULL; |
| 792 | + int ret = -ENOMEM; |
| 793 | + |
| 794 | + chip_data = kzalloc(sizeof(*chip_data), GFP_KERNEL); |
| 795 | + if (!chip_data) |
| 796 | + return ret; |
| 797 | + |
| 798 | + mutex_init(&chip_data->map_lock); |
| 799 | + fn = irq_domain_alloc_named_fwnode("hv_vpci_arm64"); |
| 800 | + if (!fn) |
| 801 | + goto free_chip; |
| 802 | + |
| 803 | + /* |
| 804 | + * IRQ domain once enabled, should not be removed since there is no |
| 805 | + * way to ensure that all the corresponding devices are also gone and |
| 806 | + * no interrupts will be generated. |
| 807 | + */ |
| 808 | + hv_msi_gic_irq_domain = acpi_irq_create_hierarchy(0, HV_PCI_MSI_SPI_NR, |
| 809 | + fn, &hv_pci_domain_ops, |
| 810 | + chip_data); |
| 811 | + |
| 812 | + if (!hv_msi_gic_irq_domain) { |
| 813 | + pr_err("Failed to create Hyper-V arm64 vPCI MSI IRQ domain\n"); |
| 814 | + goto free_chip; |
| 815 | + } |
| 816 | + |
| 817 | + return 0; |
| 818 | + |
| 819 | +free_chip: |
| 820 | + kfree(chip_data); |
| 821 | + if (fn) |
| 822 | + irq_domain_free_fwnode(fn); |
| 823 | + |
| 824 | + return ret; |
| 825 | +} |
| 826 | + |
| 827 | +static struct irq_domain *hv_pci_get_root_domain(void) |
| 828 | +{ |
| 829 | + return hv_msi_gic_irq_domain; |
| 830 | +} |
| 831 | +#endif /* CONFIG_ARM64 */ |
612 | 832 |
|
613 | 833 | /** |
614 | 834 | * hv_pci_generic_compl() - Invoked for a completion packet |
@@ -1225,28 +1445,8 @@ static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info, |
1225 | 1445 | static void hv_irq_mask(struct irq_data *data) |
1226 | 1446 | { |
1227 | 1447 | pci_msi_mask_irq(data); |
1228 | | -} |
1229 | | - |
1230 | | -static unsigned int hv_msi_get_int_vector(struct irq_data *data) |
1231 | | -{ |
1232 | | - struct irq_cfg *cfg = irqd_cfg(data); |
1233 | | - |
1234 | | - return cfg->vector; |
1235 | | -} |
1236 | | - |
1237 | | -static int hv_msi_prepare(struct irq_domain *domain, struct device *dev, |
1238 | | - int nvec, msi_alloc_info_t *info) |
1239 | | -{ |
1240 | | - int ret = pci_msi_prepare(domain, dev, nvec, info); |
1241 | | - |
1242 | | - /* |
1243 | | - * By using the interrupt remapper in the hypervisor IOMMU, contiguous |
1244 | | - * CPU vectors is not needed for multi-MSI |
1245 | | - */ |
1246 | | - if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI) |
1247 | | - info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS; |
1248 | | - |
1249 | | - return ret; |
| 1448 | + if (data->parent_data->chip->irq_mask) |
| 1449 | + irq_chip_mask_parent(data); |
1250 | 1450 | } |
1251 | 1451 |
|
1252 | 1452 | /** |
@@ -1371,6 +1571,8 @@ static void hv_irq_unmask(struct irq_data *data) |
1371 | 1571 | dev_err(&hbus->hdev->device, |
1372 | 1572 | "%s() failed: %#llx", __func__, res); |
1373 | 1573 |
|
| 1574 | + if (data->parent_data->chip->irq_unmask) |
| 1575 | + irq_chip_unmask_parent(data); |
1374 | 1576 | pci_msi_unmask_irq(data); |
1375 | 1577 | } |
1376 | 1578 |
|
@@ -1678,7 +1880,11 @@ static struct irq_chip hv_msi_irq_chip = { |
1678 | 1880 | .name = "Hyper-V PCIe MSI", |
1679 | 1881 | .irq_compose_msi_msg = hv_compose_msi_msg, |
1680 | 1882 | .irq_set_affinity = irq_chip_set_affinity_parent, |
| 1883 | +#ifdef CONFIG_X86 |
1681 | 1884 | .irq_ack = irq_chip_ack_parent, |
| 1885 | +#elif defined(CONFIG_ARM64) |
| 1886 | + .irq_eoi = irq_chip_eoi_parent, |
| 1887 | +#endif |
1682 | 1888 | .irq_mask = hv_irq_mask, |
1683 | 1889 | .irq_unmask = hv_irq_unmask, |
1684 | 1890 | }; |
|
0 commit comments