Merge 5.10.38 into android12-5.10

Changes in 5.10.38
	KEYS: trusted: Fix memory leak on object td
	tpm: fix error return code in tpm2_get_cc_attrs_tbl()
	tpm, tpm_tis: Extend locality handling to TPM2 in tpm_tis_gen_interrupt()
	tpm, tpm_tis: Reserve locality in tpm_tis_resume()
	KVM: x86/mmu: Remove the defunct update_pte() paging hook
	KVM/VMX: Invoke NMI non-IST entry instead of IST entry
	ACPI: PM: Add ACPI ID of Alder Lake Fan
	PM: runtime: Fix unpaired parent child_count for force_resume
	cpufreq: intel_pstate: Use HWP if enabled by platform firmware
	kvm: Cap halt polling at kvm->max_halt_poll_ns
	ath11k: fix thermal temperature read
	fs: dlm: fix debugfs dump
	fs: dlm: add errno handling to check callback
	fs: dlm: check on minimum msglen size
	fs: dlm: flush swork on shutdown
	tipc: convert dest node's address to network order
	ASoC: Intel: bytcr_rt5640: Enable jack-detect support on Asus T100TAF
	net/mlx5e: Use net_prefetchw instead of prefetchw in MPWQE TX datapath
	net: stmmac: Set FIFO sizes for ipq806x
	ASoC: rsnd: core: Check convert rate in rsnd_hw_params
	Bluetooth: Fix incorrect status handling in LE PHY UPDATE event
	i2c: bail out early when RDWR parameters are wrong
	ALSA: hdsp: don't disable if not enabled
	ALSA: hdspm: don't disable if not enabled
	ALSA: rme9652: don't disable if not enabled
	ALSA: bebob: enable to deliver MIDI messages for multiple ports
	Bluetooth: Set CONF_NOT_COMPLETE as l2cap_chan default
	Bluetooth: initialize skb_queue_head at l2cap_chan_create()
	net/sched: cls_flower: use ntohs for struct flow_dissector_key_ports
	net: bridge: when suppression is enabled exclude RARP packets
	Bluetooth: check for zapped sk before connecting
	selftests/powerpc: Fix L1D flushing tests for Power10
	powerpc/32: Statically initialise first emergency context
	net: hns3: remediate a potential overflow risk of bd_num_list
	net: hns3: add handling for xmit skb with recursive fraglist
	ip6_vti: proper dev_{hold|put} in ndo_[un]init methods
	ASoC: Intel: bytcr_rt5640: Add quirk for the Chuwi Hi8 tablet
	ice: handle increasing Tx or Rx ring sizes
	Bluetooth: btusb: Enable quirk boolean flag for Mediatek Chip.
	ASoC: rt5670: Add a quirk for the Dell Venue 10 Pro 5055
	i2c: Add I2C_AQ_NO_REP_START adapter quirk
	MIPS: Loongson64: Use _CACHE_UNCACHED instead of _CACHE_UNCACHED_ACCELERATED
	coresight: Do not scan for graph if none is present
	IB/hfi1: Correct oversized ring allocation
	mac80211: clear the beacon's CRC after channel switch
	pinctrl: samsung: use 'int' for register masks in Exynos
	rtw88: 8822c: add LC calibration for RTL8822C
	mt76: mt7615: support loading EEPROM for MT7613BE
	mt76: mt76x0: disable GTK offloading
	mt76: mt7915: fix txpower init for TSSI off chips
	fuse: invalidate attrs when page writeback completes
	virtiofs: fix userns
	cuse: prevent clone
	iwlwifi: pcie: make cfg vs. trans_cfg more robust
	powerpc/mm: Add cond_resched() while removing hpte mappings
	ASoC: rsnd: call rsnd_ssi_master_clk_start() from rsnd_ssi_init()
	Revert "iommu/amd: Fix performance counter initialization"
	iommu/amd: Remove performance counter pre-initialization test
	drm/amd/display: Force vsync flip when reconfiguring MPCC
	selftests: Set CC to clang in lib.mk if LLVM is set
	kconfig: nconf: stop endless search loops
	ALSA: hda/realtek: Add quirk for Lenovo Ideapad S740
	ASoC: Intel: sof_sdw: add quirk for new ADL-P Rvp
	ALSA: hda/hdmi: fix race in handling acomp ELD notification at resume
	sctp: Fix out-of-bounds warning in sctp_process_asconf_param()
	flow_dissector: Fix out-of-bounds warning in __skb_flow_bpf_to_target()
	powerpc/smp: Set numa node before updating mask
	ASoC: rt286: Generalize support for ALC3263 codec
	ethtool: ioctl: Fix out-of-bounds warning in store_link_ksettings_for_user()
	net: sched: tapr: prevent cycle_time == 0 in parse_taprio_schedule
	samples/bpf: Fix broken tracex1 due to kprobe argument change
	powerpc/pseries: Stop calling printk in rtas_stop_self()
	drm/amd/display: fixed divide by zero kernel crash during dsc enablement
	drm/amd/display: add handling for hdcp2 rx id list validation
	drm/amdgpu: Add mem sync flag for IB allocated by SA
	mt76: mt7615: fix entering driver-own state on mt7663
	crypto: ccp: Free SEV device if SEV init fails
	wl3501_cs: Fix out-of-bounds warnings in wl3501_send_pkt
	wl3501_cs: Fix out-of-bounds warnings in wl3501_mgmt_join
	qtnfmac: Fix possible buffer overflow in qtnf_event_handle_external_auth
	powerpc/iommu: Annotate nested lock for lockdep
	iavf: remove duplicate free resources calls
	net: ethernet: mtk_eth_soc: fix RX VLAN offload
	selftests: mlxsw: Increase the tolerance of backlog buildup
	selftests: mlxsw: Fix mausezahn invocation in ERSPAN scale test
	kbuild: generate Module.symvers only when vmlinux exists
	bnxt_en: Add PCI IDs for Hyper-V VF devices.
	ia64: module: fix symbolizer crash on fdescr
	watchdog: rename __touch_watchdog() to a better descriptive name
	watchdog: explicitly update timestamp when reporting softlockup
	watchdog/softlockup: remove logic that tried to prevent repeated reports
	watchdog: fix barriers when printing backtraces from all CPUs
	ASoC: rt286: Make RT286_SET_GPIO_* readable and writable
	thermal: thermal_of: Fix error return code of thermal_of_populate_bind_params()
	f2fs: move ioctl interface definitions to separated file
	f2fs: fix compat F2FS_IOC_{MOVE,GARBAGE_COLLECT}_RANGE
	f2fs: fix to allow migrating fully valid segment
	f2fs: fix panic during f2fs_resize_fs()
	f2fs: fix a redundant call to f2fs_balance_fs if an error occurs
	remoteproc: qcom_q6v5_mss: Replace ioremap with memremap
	remoteproc: qcom_q6v5_mss: Validate p_filesz in ELF loader
	PCI: iproc: Fix return value of iproc_msi_irq_domain_alloc()
	PCI: Release OF node in pci_scan_device()'s error path
	ARM: 9064/1: hw_breakpoint: Do not directly check the event's overflow_handler hook
	f2fs: fix to align to section for fallocate() on pinned file
	f2fs: fix to update last i_size if fallocate partially succeeds
	PCI: endpoint: Make *_get_first_free_bar() take into account 64 bit BAR
	PCI: endpoint: Add helper API to get the 'next' unreserved BAR
	PCI: endpoint: Make *_free_bar() to return error codes on failure
	PCI: endpoint: Fix NULL pointer dereference for ->get_features()
	f2fs: fix to avoid touching checkpointed data in get_victim()
	f2fs: fix to cover __allocate_new_section() with curseg_lock
	f2fs: Fix a hungtask problem in atomic write
	f2fs: fix to avoid accessing invalid fio in f2fs_allocate_data_block()
	rpmsg: qcom_glink_native: fix error return code of qcom_glink_rx_data()
	NFS: nfs4_bitmask_adjust() must not change the server global bitmasks
	NFS: Fix attribute bitmask in _nfs42_proc_fallocate()
	NFSv4.2: Always flush out writes in nfs42_proc_fallocate()
	NFS: Deal correctly with attribute generation counter overflow
	PCI: endpoint: Fix missing destroy_workqueue()
	pNFS/flexfiles: fix incorrect size check in decode_nfs_fh()
	NFSv4.2 fix handling of sr_eof in SEEK's reply
	SUNRPC: Move fault injection call sites
	SUNRPC: Remove trace_xprt_transmit_queued
	SUNRPC: Handle major timeout in xprt_adjust_timeout()
	thermal/drivers/tsens: Fix missing put_device error
	NFSv4.x: Don't return NFS4ERR_NOMATCHING_LAYOUT if we're unmounting
	nfsd: ensure new clients break delegations
	rtc: fsl-ftm-alarm: add MODULE_TABLE()
	dmaengine: idxd: Fix potential null dereference on pointer status
	dmaengine: idxd: fix dma device lifetime
	dmaengine: idxd: fix cdev setup and free device lifetime issues
	SUNRPC: fix ternary sign expansion bug in tracing
	pwm: atmel: Fix duty cycle calculation in .get_state()
	xprtrdma: Avoid Receive Queue wrapping
	xprtrdma: Fix cwnd update ordering
	xprtrdma: rpcrdma_mr_pop() already does list_del_init()
	swiotlb: Fix the type of index
	ceph: fix inode leak on getattr error in __fh_to_dentry
	scsi: qla2xxx: Prevent PRLI in target mode
	scsi: ufs: core: Do not put UFS power into LPM if link is broken
	scsi: ufs: core: Cancel rpm_dev_flush_recheck_work during system suspend
	scsi: ufs: core: Narrow down fast path in system suspend path
	rtc: ds1307: Fix wday settings for rx8130
	net: hns3: fix incorrect configuration for igu_egu_hw_err
	net: hns3: initialize the message content in hclge_get_link_mode()
	net: hns3: add check for HNS3_NIC_STATE_INITED in hns3_reset_notify_up_enet()
	net: hns3: fix for vxlan gpe tx checksum bug
	net: hns3: use netif_tx_disable to stop the transmit queue
	net: hns3: disable phy loopback setting in hclge_mac_start_phy
	sctp: do asoc update earlier in sctp_sf_do_dupcook_a
	RISC-V: Fix error code returned by riscv_hartid_to_cpuid()
	sunrpc: Fix misplaced barrier in call_decode
	libbpf: Fix signed overflow in ringbuf_process_ring
	block/rnbd-clt: Change queue_depth type in rnbd_clt_session to size_t
	block/rnbd-clt: Check the return value of the function rtrs_clt_query
	ethernet:enic: Fix a use after free bug in enic_hard_start_xmit
	sctp: fix a SCTP_MIB_CURRESTAB leak in sctp_sf_do_dupcook_b
	netfilter: xt_SECMARK: add new revision to fix structure layout
	xsk: Fix for xp_aligned_validate_desc() when len == chunk_size
	net: stmmac: Clear receive all(RA) bit when promiscuous mode is off
	drm/radeon: Fix off-by-one power_state index heap overwrite
	drm/radeon: Avoid power table parsing memory leaks
	arm64: entry: factor irq triage logic into macros
	arm64: entry: always set GIC_PRIO_PSR_I_SET during entry
	khugepaged: fix wrong result value for trace_mm_collapse_huge_page_isolate()
	mm/hugeltb: handle the error case in hugetlb_fix_reserve_counts()
	mm/migrate.c: fix potential indeterminate pte entry in migrate_vma_insert_page()
	ksm: fix potential missing rmap_item for stable_node
	mm/gup: check every subpage of a compound page during isolation
	mm/gup: return an error on migration failure
	mm/gup: check for isolation errors
	ethtool: fix missing NLM_F_MULTI flag when dumping
	net: fix nla_strcmp to handle more then one trailing null character
	smc: disallow TCP_ULP in smc_setsockopt()
	netfilter: nfnetlink_osf: Fix a missing skb_header_pointer() NULL check
	netfilter: nftables: Fix a memleak from userdata error path in new objects
	can: mcp251xfd: mcp251xfd_probe(): add missing can_rx_offload_del() in error path
	can: mcp251x: fix resume from sleep before interface was brought up
	can: m_can: m_can_tx_work_queue(): fix tx_skb race condition
	sched: Fix out-of-bound access in uclamp
	sched/fair: Fix unfairness caused by missing load decay
	fs/proc/generic.c: fix incorrect pde_is_permanent check
	kernel: kexec_file: fix error return code of kexec_calculate_store_digests()
	kernel/resource: make walk_system_ram_res() find all busy IORESOURCE_SYSTEM_RAM resources
	kernel/resource: make walk_mem_res() find all busy IORESOURCE_MEM resources
	netfilter: nftables: avoid overflows in nft_hash_buckets()
	i40e: fix broken XDP support
	i40e: Fix use-after-free in i40e_client_subtask()
	i40e: fix the restart auto-negotiation after FEC modified
	i40e: Fix PHY type identifiers for 2.5G and 5G adapters
	mptcp: fix splat when closing unaccepted socket
	f2fs: avoid unneeded data copy in f2fs_ioc_move_range()
	ARC: entry: fix off-by-one error in syscall number validation
	ARC: mm: PAE: use 40-bit physical page mask
	ARC: mm: Use max_high_pfn as a HIGHMEM zone border
	powerpc/64s: Fix crashes when toggling stf barrier
	powerpc/64s: Fix crashes when toggling entry flush barrier
	hfsplus: prevent corruption in shrinking truncate
	squashfs: fix divide error in calculate_skip()
	userfaultfd: release page in error path to avoid BUG_ON
	kasan: fix unit tests with CONFIG_UBSAN_LOCAL_BOUNDS enabled
	mm/hugetlb: fix F_SEAL_FUTURE_WRITE
	blk-iocost: fix weight updates of inner active iocgs
	arm64: mte: initialize RGSR_EL1.SEED in __cpu_setup
	arm64: Fix race condition on PG_dcache_clean in __sync_icache_dcache()
	btrfs: fix race leading to unpersisted data and metadata on fsync
	drm/radeon/dpm: Disable sclk switching on Oland when two 4K 60Hz monitors are connected
	drm/amd/display: Initialize attribute for hdcp_srm sysfs file
	drm/i915: Avoid div-by-zero on gen2
	kvm: exit halt polling on need_resched() as well
	KVM: LAPIC: Accurately guarantee busy wait for timer to expire when using hv_timer
	drm/msm/dp: initialize audio_comp when audio starts
	KVM: x86: Cancel pvclock_gtod_work on module removal
	KVM: x86: Prevent deadlock against tk_core.seq
	dax: Add an enum for specifying dax wakup mode
	dax: Add a wakeup mode parameter to put_unlocked_entry()
	dax: Wake up all waiters after invalidating dax entry
	xen/unpopulated-alloc: consolidate pgmap manipulation
	xen/unpopulated-alloc: fix error return code in fill_list()
	perf tools: Fix dynamic libbpf link
	usb: dwc3: gadget: Free gadget structure only after freeing endpoints
	iio: light: gp2ap002: Fix rumtime PM imbalance on error
	iio: proximity: pulsedlight: Fix rumtime PM imbalance on error
	iio: hid-sensors: select IIO_TRIGGERED_BUFFER under HID_SENSOR_IIO_TRIGGER
	usb: fotg210-hcd: Fix an error message
	hwmon: (occ) Fix poll rate limiting
	usb: musb: Fix an error message
	ACPI: scan: Fix a memory leak in an error handling path
	kyber: fix out of bounds access when preempted
	nvmet: add lba to sect conversion helpers
	nvmet: fix inline bio check for bdev-ns
	nvmet-rdma: Fix NULL deref when SEND is completed with error
	f2fs: compress: fix to free compress page correctly
	f2fs: compress: fix race condition of overwrite vs truncate
	f2fs: compress: fix to assign cc.cluster_idx correctly
	nbd: Fix NULL pointer in flush_workqueue
	blk-mq: plug request for shared sbitmap
	blk-mq: Swap two calls in blk_mq_exit_queue()
	usb: dwc3: omap: improve extcon initialization
	usb: dwc3: pci: Enable usb2-gadget-lpm-disable for Intel Merrifield
	usb: xhci: Increase timeout for HC halt
	usb: dwc2: Fix gadget DMA unmap direction
	usb: core: hub: fix race condition about TRSMRCY of resume
	usb: dwc3: gadget: Enable suspend events
	usb: dwc3: gadget: Return success always for kick transfer in ep queue
	usb: typec: ucsi: Retrieve all the PDOs instead of just the first 4
	usb: typec: ucsi: Put fwnode in any case during ->probe()
	xhci-pci: Allow host runtime PM as default for Intel Alder Lake xHCI
	xhci: Do not use GFP_KERNEL in (potentially) atomic context
	xhci: Add reset resume quirk for AMD xhci controller.
	iio: gyro: mpu3050: Fix reported temperature value
	iio: tsl2583: Fix division by a zero lux_val
	cdc-wdm: untangle a circular dependency between callback and softint
	xen/gntdev: fix gntdev_mmap() error exit path
	KVM: x86: Emulate RDPID only if RDTSCP is supported
	KVM: x86: Move RDPID emulation intercept to its own enum
	KVM: nVMX: Always make an attempt to map eVMCS after migration
	KVM: VMX: Do not advertise RDPID if ENABLE_RDTSCP control is unsupported
	KVM: VMX: Disable preemption when probing user return MSRs
	Revert "iommu/vt-d: Remove WO permissions on second-level paging entries"
	Revert "iommu/vt-d: Preset Access/Dirty bits for IOVA over FL"
	iommu/vt-d: Preset Access/Dirty bits for IOVA over FL
	iommu/vt-d: Remove WO permissions on second-level paging entries
	mm: fix struct page layout on 32-bit systems
	MIPS: Reinstate platform `__div64_32' handler
	MIPS: Avoid DIVU in `__div64_32' is result would be zero
	MIPS: Avoid handcoded DIVU in `__div64_32' altogether
	clocksource/drivers/timer-ti-dm: Prepare to handle dra7 timer wrap issue
	clocksource/drivers/timer-ti-dm: Handle dra7 timer wrap errata i940
	ARM: 9011/1: centralize phys-to-virt conversion of DT/ATAGS address
	ARM: 9012/1: move device tree mapping out of linear region
	ARM: 9020/1: mm: use correct section size macro to describe the FDT virtual address
	ARM: 9027/1: head.S: explicitly map DT even if it lives in the first physical section
	usb: typec: tcpm: Fix error while calculating PPS out values
	kobject_uevent: remove warning in init_uevent_argv()
	drm/i915/gt: Fix a double free in gen8_preallocate_top_level_pdp
	drm/i915: Read C0DRB3/C1DRB3 as 16 bits again
	drm/i915/overlay: Fix active retire callback alignment
	drm/i915: Fix crash in auto_retire
	clk: exynos7: Mark aclk_fsys1_200 as critical
	media: rkvdec: Remove of_match_ptr()
	i2c: mediatek: Fix send master code at more than 1MHz
	dt-bindings: media: renesas,vin: Make resets optional on R-Car Gen1
	dt-bindings: serial: 8250: Remove duplicated compatible strings
	debugfs: Make debugfs_allow RO after init
	ext4: fix debug format string warning
	nvme: do not try to reconfigure APST when the controller is not live
	ASoC: rsnd: check all BUSIF status when error
	Linux 5.10.38

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: Ia32e01283b488a38be48015c58a0e481f09aaf65
This commit is contained in:
Greg Kroah-Hartman 2021-05-19 11:11:48 +02:00
commit 76002c201f
299 changed files with 2912 additions and 1239 deletions

1
.gitignore vendored
View file

@ -56,6 +56,7 @@ modules.order
/tags
/TAGS
/linux
/modules-only.symvers
/vmlinux
/vmlinux.32
/vmlinux.symvers

View file

@ -45,9 +45,14 @@ fffe8000 fffeffff DTCM mapping area for platforms with
fffe0000 fffe7fff ITCM mapping area for platforms with
ITCM mounted inside the CPU.
ffc00000 ffefffff Fixmap mapping region. Addresses provided
ffc80000 ffefffff Fixmap mapping region. Addresses provided
by fix_to_virt() will be located here.
ffc00000 ffc7ffff Guard region
ff800000 ffbfffff Permanent, fixed read-only mapping of the
firmware provided DT blob
fee00000 feffffff Mapping of PCI I/O space. This is a static
mapping within the vmalloc space.

View file

@ -278,23 +278,35 @@ required:
- interrupts
- clocks
- power-domains
- resets
if:
properties:
compatible:
contains:
enum:
- renesas,vin-r8a7778
- renesas,vin-r8a7779
- renesas,rcar-gen2-vin
then:
required:
- port
else:
required:
- renesas,id
- ports
allOf:
- if:
not:
properties:
compatible:
contains:
enum:
- renesas,vin-r8a7778
- renesas,vin-r8a7779
then:
required:
- resets
- if:
properties:
compatible:
contains:
enum:
- renesas,vin-r8a7778
- renesas,vin-r8a7779
- renesas,rcar-gen2-vin
then:
required:
- port
else:
required:
- renesas,id
- ports
additionalProperties: false

View file

@ -93,11 +93,6 @@ properties:
- mediatek,mt7622-btif
- mediatek,mt7623-btif
- const: mediatek,mtk-btif
- items:
- enum:
- mediatek,mt7622-btif
- mediatek,mt7623-btif
- const: mediatek,mtk-btif
- items:
- const: mrvl,mmp-uart
- const: intel,xscale-uart

View file

@ -178,6 +178,7 @@ mktables
mktree
mkutf8data
modpost
modules-only.symvers
modules.builtin
modules.builtin.modinfo
modules.nsdeps

View file

@ -6694,6 +6694,7 @@ F: Documentation/filesystems/f2fs.rst
F: fs/f2fs/
F: include/linux/f2fs_fs.h
F: include/trace/events/f2fs.h
F: include/uapi/linux/f2fs.h
F71805F HARDWARE MONITORING DRIVER
M: Jean Delvare <jdelvare@suse.com>

View file

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 37
SUBLEVEL = 38
EXTRAVERSION =
NAME = Dare mighty things
@ -1567,7 +1567,7 @@ endif # CONFIG_MODULES
# make distclean Remove editor backup files, patch leftover files and the like
# Directories & files removed with 'make clean'
CLEAN_FILES += include/ksym vmlinux.symvers \
CLEAN_FILES += include/ksym vmlinux.symvers modules-only.symvers \
modules.builtin modules.builtin.modinfo modules.nsdeps \
compile_commands.json

View file

@ -7,6 +7,18 @@
#include <uapi/asm/page.h>
#ifdef CONFIG_ARC_HAS_PAE40
#define MAX_POSSIBLE_PHYSMEM_BITS 40
#define PAGE_MASK_PHYS (0xff00000000ull | PAGE_MASK)
#else /* CONFIG_ARC_HAS_PAE40 */
#define MAX_POSSIBLE_PHYSMEM_BITS 32
#define PAGE_MASK_PHYS PAGE_MASK
#endif /* CONFIG_ARC_HAS_PAE40 */
#ifndef __ASSEMBLY__
#define clear_page(paddr) memset((paddr), 0, PAGE_SIZE)

View file

@ -107,8 +107,8 @@
#define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
/* Set of bits not changed in pte_modify */
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL)
#define _PAGE_CHG_MASK (PAGE_MASK_PHYS | _PAGE_ACCESSED | _PAGE_DIRTY | \
_PAGE_SPECIAL)
/* More Abbrevaited helpers */
#define PAGE_U_NONE __pgprot(___DEF)
#define PAGE_U_R __pgprot(___DEF | _PAGE_READ)
@ -132,13 +132,7 @@
#define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_HW_SZ)
#define PTE_BITS_RWX (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ)
#ifdef CONFIG_ARC_HAS_PAE40
#define PTE_BITS_NON_RWX_IN_PD1 (0xff00000000 | PAGE_MASK | _PAGE_CACHEABLE)
#define MAX_POSSIBLE_PHYSMEM_BITS 40
#else
#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE)
#define MAX_POSSIBLE_PHYSMEM_BITS 32
#endif
#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK_PHYS | _PAGE_CACHEABLE)
/**************************************************************************
* Mapping of vm_flags (Generic VM) to PTE flags (arch specific)

View file

@ -33,5 +33,4 @@
#define PAGE_MASK (~(PAGE_SIZE-1))
#endif /* _UAPI__ASM_ARC_PAGE_H */

View file

@ -177,7 +177,7 @@ tracesys:
; Do the Sys Call as we normally would.
; Validate the Sys Call number
cmp r8, NR_syscalls
cmp r8, NR_syscalls - 1
mov.hi r0, -ENOSYS
bhi tracesys_exit
@ -255,7 +255,7 @@ ENTRY(EV_Trap)
;============ Normal syscall case
; syscall num shd not exceed the total system calls avail
cmp r8, NR_syscalls
cmp r8, NR_syscalls - 1
mov.hi r0, -ENOSYS
bhi .Lret_from_system_call

View file

@ -158,7 +158,16 @@ void __init setup_arch_memory(void)
min_high_pfn = PFN_DOWN(high_mem_start);
max_high_pfn = PFN_DOWN(high_mem_start + high_mem_sz);
max_zone_pfn[ZONE_HIGHMEM] = min_low_pfn;
/*
* max_high_pfn should be ok here for both HIGHMEM and HIGHMEM+PAE.
* For HIGHMEM without PAE max_high_pfn should be less than
* min_low_pfn to guarantee that these two regions don't overlap.
* For PAE case highmem is greater than lowmem, so it is natural
* to use max_high_pfn.
*
* In both cases, holes should be handled by pfn_valid().
*/
max_zone_pfn[ZONE_HIGHMEM] = max_high_pfn;
high_memory = (void *)(min_high_pfn << PAGE_SHIFT);
kmap_init();

View file

@ -53,9 +53,10 @@ EXPORT_SYMBOL(ioremap);
void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
unsigned long flags)
{
unsigned int off;
unsigned long vaddr;
struct vm_struct *area;
phys_addr_t off, end;
phys_addr_t end;
pgprot_t prot = __pgprot(flags);
/* Don't allow wraparound, zero size */
@ -72,7 +73,7 @@ void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
/* Mappings have to be page-aligned */
off = paddr & ~PAGE_MASK;
paddr &= PAGE_MASK;
paddr &= PAGE_MASK_PHYS;
size = PAGE_ALIGN(end + 1) - paddr;
/*

View file

@ -576,7 +576,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
pte_t *ptep)
{
unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK;
phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK_PHYS;
struct page *page = pfn_to_page(pte_pfn(*ptep));
create_tlb(vma, vaddr, ptep);

View file

@ -1168,7 +1168,7 @@
};
};
target-module@34000 { /* 0x48034000, ap 7 46.0 */
timer3_target: target-module@34000 { /* 0x48034000, ap 7 46.0 */
compatible = "ti,sysc-omap4-timer", "ti,sysc";
reg = <0x34000 0x4>,
<0x34010 0x4>;
@ -1195,7 +1195,7 @@
};
};
target-module@36000 { /* 0x48036000, ap 9 4e.0 */
timer4_target: target-module@36000 { /* 0x48036000, ap 9 4e.0 */
compatible = "ti,sysc-omap4-timer", "ti,sysc";
reg = <0x36000 0x4>,
<0x36010 0x4>;

View file

@ -46,6 +46,7 @@
timer {
compatible = "arm,armv7-timer";
status = "disabled"; /* See ARM architected timer wrap erratum i940 */
interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
@ -1090,3 +1091,22 @@
assigned-clock-parents = <&sys_32k_ck>;
};
};
/* Local timers, see ARM architected timer wrap erratum i940 */
&timer3_target {
ti,no-reset-on-init;
ti,no-idle;
timer@0 {
assigned-clocks = <&l4per_clkctrl DRA7_L4PER_TIMER3_CLKCTRL 24>;
assigned-clock-parents = <&timer_sys_clk_div>;
};
};
&timer4_target {
ti,no-reset-on-init;
ti,no-idle;
timer@0 {
assigned-clocks = <&l4per_clkctrl DRA7_L4PER_TIMER4_CLKCTRL 24>;
assigned-clock-parents = <&timer_sys_clk_div>;
};
};

View file

@ -2,7 +2,7 @@
#ifndef _ASM_FIXMAP_H
#define _ASM_FIXMAP_H
#define FIXADDR_START 0xffc00000UL
#define FIXADDR_START 0xffc80000UL
#define FIXADDR_END 0xfff00000UL
#define FIXADDR_TOP (FIXADDR_END - PAGE_SIZE)

View file

@ -67,6 +67,10 @@
*/
#define XIP_VIRT_ADDR(physaddr) (MODULES_VADDR + ((physaddr) & 0x000fffff))
#define FDT_FIXED_BASE UL(0xff800000)
#define FDT_FIXED_SIZE (2 * SECTION_SIZE)
#define FDT_VIRT_BASE(physbase) ((void *)(FDT_FIXED_BASE | (physbase) % SECTION_SIZE))
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
/*
* Allow 16MB-aligned ioremap pages
@ -107,6 +111,7 @@ extern unsigned long vectors_base;
#define MODULES_VADDR PAGE_OFFSET
#define XIP_VIRT_ADDR(physaddr) (physaddr)
#define FDT_VIRT_BASE(physbase) ((void *)(physbase))
#endif /* !CONFIG_MMU */

View file

@ -9,12 +9,12 @@
#ifdef CONFIG_OF
extern const struct machine_desc *setup_machine_fdt(unsigned int dt_phys);
extern const struct machine_desc *setup_machine_fdt(void *dt_virt);
extern void __init arm_dt_init_cpu_maps(void);
#else /* CONFIG_OF */
static inline const struct machine_desc *setup_machine_fdt(unsigned int dt_phys)
static inline const struct machine_desc *setup_machine_fdt(void *dt_virt)
{
return NULL;
}

View file

@ -2,11 +2,11 @@
void convert_to_tag_list(struct tag *tags);
#ifdef CONFIG_ATAGS
const struct machine_desc *setup_machine_tags(phys_addr_t __atags_pointer,
const struct machine_desc *setup_machine_tags(void *__atags_vaddr,
unsigned int machine_nr);
#else
static inline const struct machine_desc * __init __noreturn
setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr)
setup_machine_tags(void *__atags_vaddr, unsigned int machine_nr)
{
early_print("no ATAGS support: can't continue\n");
while (true);

View file

@ -174,7 +174,7 @@ static void __init squash_mem_tags(struct tag *tag)
}
const struct machine_desc * __init
setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr)
setup_machine_tags(void *atags_vaddr, unsigned int machine_nr)
{
struct tag *tags = (struct tag *)&default_tags;
const struct machine_desc *mdesc = NULL, *p;
@ -195,8 +195,8 @@ setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr)
if (!mdesc)
return NULL;
if (__atags_pointer)
tags = phys_to_virt(__atags_pointer);
if (atags_vaddr)
tags = atags_vaddr;
else if (mdesc->atag_offset)
tags = (void *)(PAGE_OFFSET + mdesc->atag_offset);

View file

@ -203,12 +203,12 @@ static const void * __init arch_get_next_mach(const char *const **match)
/**
* setup_machine_fdt - Machine setup when an dtb was passed to the kernel
* @dt_phys: physical address of dt blob
* @dt_virt: virtual address of dt blob
*
* If a dtb was passed to the kernel in r2, then use it to choose the
* correct machine_desc and to setup the system.
*/
const struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
const struct machine_desc * __init setup_machine_fdt(void *dt_virt)
{
const struct machine_desc *mdesc, *mdesc_best = NULL;
@ -221,7 +221,7 @@ const struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
mdesc_best = &__mach_desc_GENERIC_DT;
#endif
if (!dt_phys || !early_init_dt_verify(phys_to_virt(dt_phys)))
if (!dt_virt || !early_init_dt_verify(dt_virt))
return NULL;
mdesc = of_flat_dt_match_machine(mdesc_best, arch_get_next_mach);

View file

@ -274,11 +274,10 @@ __create_page_tables:
* We map 2 sections in case the ATAGs/DTB crosses a section boundary.
*/
mov r0, r2, lsr #SECTION_SHIFT
movs r0, r0, lsl #SECTION_SHIFT
subne r3, r0, r8
addne r3, r3, #PAGE_OFFSET
addne r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER)
orrne r6, r7, r0
cmp r2, #0
ldrne r3, =FDT_FIXED_BASE >> (SECTION_SHIFT - PMD_ORDER)
addne r3, r3, r4
orrne r6, r7, r0, lsl #SECTION_SHIFT
strne r6, [r3], #1 << PMD_ORDER
addne r6, r6, #1 << SECTION_SHIFT
strne r6, [r3]

View file

@ -886,7 +886,7 @@ static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs)
info->trigger = addr;
pr_debug("breakpoint fired: address = 0x%x\n", addr);
perf_bp_event(bp, regs);
if (!bp->overflow_handler)
if (is_default_overflow_handler(bp))
enable_single_step(bp, addr);
goto unlock;
}

View file

@ -18,6 +18,7 @@
#include <linux/of_platform.h>
#include <linux/init.h>
#include <linux/kexec.h>
#include <linux/libfdt.h>
#include <linux/of_fdt.h>
#include <linux/cpu.h>
#include <linux/interrupt.h>
@ -1095,19 +1096,27 @@ static struct notifier_block arm_restart_nb = {
void __init setup_arch(char **cmdline_p)
{
const struct machine_desc *mdesc;
const struct machine_desc *mdesc = NULL;
void *atags_vaddr = NULL;
if (__atags_pointer)
atags_vaddr = FDT_VIRT_BASE(__atags_pointer);
setup_processor();
mdesc = setup_machine_fdt(__atags_pointer);
if (atags_vaddr) {
mdesc = setup_machine_fdt(atags_vaddr);
if (mdesc)
memblock_reserve(__atags_pointer,
fdt_totalsize(atags_vaddr));
}
if (!mdesc)
mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
mdesc = setup_machine_tags(atags_vaddr, __machine_arch_type);
if (!mdesc) {
early_print("\nError: invalid dtb and unrecognized/unsupported machine ID\n");
early_print(" r1=0x%08x, r2=0x%08x\n", __machine_arch_type,
__atags_pointer);
if (__atags_pointer)
early_print(" r2[]=%*ph\n", 16,
phys_to_virt(__atags_pointer));
early_print(" r2[]=%*ph\n", 16, atags_vaddr);
dump_machine_table();
}

View file

@ -223,7 +223,6 @@ void __init arm_memblock_init(const struct machine_desc *mdesc)
if (mdesc->reserve)
mdesc->reserve();
early_init_fdt_reserve_self();
early_init_fdt_scan_reserved_mem();
/* reserve memory for DMA contiguous allocations */

View file

@ -39,6 +39,8 @@
#include "mm.h"
#include "tcm.h"
extern unsigned long __atags_pointer;
/*
* empty_zero_page is a special page that is used for
* zero-initialized data and COW.
@ -946,7 +948,7 @@ static void __init create_mapping(struct map_desc *md)
return;
}
if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
if (md->type == MT_DEVICE &&
md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START &&
(md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
@ -1333,6 +1335,15 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
for (addr = VMALLOC_START; addr < (FIXADDR_TOP & PMD_MASK); addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr));
if (__atags_pointer) {
/* create a read-only mapping of the device tree */
map.pfn = __phys_to_pfn(__atags_pointer & SECTION_MASK);
map.virtual = FDT_FIXED_BASE;
map.length = FDT_FIXED_SIZE;
map.type = MT_ROM;
create_mapping(&map);
}
/*
* Map the kernel if it is XIP.
* It is always first in the modulearea.
@ -1489,8 +1500,7 @@ static void __init map_lowmem(void)
}
#ifdef CONFIG_ARM_PV_FIXUP
extern unsigned long __atags_pointer;
typedef void pgtables_remap(long long offset, unsigned long pgd, void *bdata);
typedef void pgtables_remap(long long offset, unsigned long pgd);
pgtables_remap lpae_pgtables_remap_asm;
/*
@ -1503,7 +1513,6 @@ static void __init early_paging_init(const struct machine_desc *mdesc)
unsigned long pa_pgd;
unsigned int cr, ttbcr;
long long offset;
void *boot_data;
if (!mdesc->pv_fixup)
return;
@ -1520,7 +1529,6 @@ static void __init early_paging_init(const struct machine_desc *mdesc)
*/
lpae_pgtables_remap = (pgtables_remap *)(unsigned long)__pa(lpae_pgtables_remap_asm);
pa_pgd = __pa(swapper_pg_dir);
boot_data = __va(__atags_pointer);
barrier();
pr_info("Switching physical address space to 0x%08llx\n",
@ -1556,7 +1564,7 @@ static void __init early_paging_init(const struct machine_desc *mdesc)
* needs to be assembly. It's fairly simple, as we're using the
* temporary tables setup by the initial assembly code.
*/
lpae_pgtables_remap(offset, pa_pgd, boot_data);
lpae_pgtables_remap(offset, pa_pgd);
/* Re-enable the caches and cacheable TLB walks */
asm volatile("mcr p15, 0, %0, c2, c0, 2" : : "r" (ttbcr));

View file

@ -39,8 +39,8 @@ ENTRY(lpae_pgtables_remap_asm)
/* Update level 2 entries for the boot data */
add r7, r2, #0x1000
add r7, r7, r3, lsr #SECTION_SHIFT - L2_ORDER
bic r7, r7, #(1 << L2_ORDER) - 1
movw r3, #FDT_FIXED_BASE >> (SECTION_SHIFT - L2_ORDER)
add r7, r7, r3
ldrd r4, r5, [r7]
adds r4, r4, r0
adc r5, r5, r1

View file

@ -131,6 +131,9 @@ static inline void local_daif_inherit(struct pt_regs *regs)
if (interrupts_enabled(regs))
trace_hardirqs_on();
if (system_uses_irq_prio_masking())
gic_write_pmr(regs->pmr_save);
/*
* We can't use local_daif_restore(regs->pstate) here as
* system_has_prio_mask_debugging() won't restore the I bit if it can

View file

@ -181,14 +181,6 @@ static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
{
unsigned long far = read_sysreg(far_el1);
/*
* The CPU masked interrupts, and we are leaving them masked during
* do_debug_exception(). Update PMR as if we had called
* local_daif_mask().
*/
if (system_uses_irq_prio_masking())
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
arm64_enter_el1_dbg(regs);
do_debug_exception(far, esr, regs);
arm64_exit_el1_dbg(regs);
@ -354,9 +346,6 @@ static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
/* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
unsigned long far = read_sysreg(far_el1);
if (system_uses_irq_prio_masking())
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
enter_from_user_mode();
do_debug_exception(far, esr, regs);
local_daif_restore(DAIF_PROCCTX_NOIRQ);
@ -364,9 +353,6 @@ static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
static void noinstr el0_svc(struct pt_regs *regs)
{
if (system_uses_irq_prio_masking())
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
enter_from_user_mode();
do_el0_svc(regs);
}
@ -441,9 +427,6 @@ static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
static void noinstr el0_svc_compat(struct pt_regs *regs)
{
if (system_uses_irq_prio_masking())
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
enter_from_user_mode();
do_el0_svc_compat(regs);
}

View file

@ -298,6 +298,8 @@ alternative_else_nop_endif
alternative_if ARM64_HAS_IRQ_PRIO_MASKING
mrs_s x20, SYS_ICC_PMR_EL1
str x20, [sp, #S_PMR_SAVE]
mov x20, #GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET
msr_s SYS_ICC_PMR_EL1, x20
alternative_else_nop_endif
/* Re-enable tag checking (TCO set on exception entry) */
@ -505,8 +507,8 @@ tsk .req x28 // current thread_info
/*
* Interrupt handling.
*/
.macro irq_handler
ldr_l x1, handle_arch_irq
.macro irq_handler, handler:req
ldr_l x1, \handler
mov x0, sp
irq_stack_entry
blr x1
@ -536,13 +538,41 @@ alternative_endif
#endif
.endm
.macro gic_prio_irq_setup, pmr:req, tmp:req
#ifdef CONFIG_ARM64_PSEUDO_NMI
alternative_if ARM64_HAS_IRQ_PRIO_MASKING
orr \tmp, \pmr, #GIC_PRIO_PSR_I_SET
msr_s SYS_ICC_PMR_EL1, \tmp
alternative_else_nop_endif
.macro el1_interrupt_handler, handler:req
enable_da_f
mov x0, sp
bl enter_el1_irq_or_nmi
irq_handler \handler
#ifdef CONFIG_PREEMPTION
ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count
alternative_if ARM64_HAS_IRQ_PRIO_MASKING
/*
* DA_F were cleared at start of handling. If anything is set in DAIF,
* we come back from an NMI, so skip preemption
*/
mrs x0, daif
orr x24, x24, x0
alternative_else_nop_endif
cbnz x24, 1f // preempt count != 0 || NMI return path
bl arm64_preempt_schedule_irq // irq en/disable is done inside
1:
#endif
mov x0, sp
bl exit_el1_irq_or_nmi
.endm
.macro el0_interrupt_handler, handler:req
user_exit_irqoff
enable_da_f
tbz x22, #55, 1f
bl do_el0_irq_bp_hardening
1:
irq_handler \handler
.endm
.text
@ -674,32 +704,7 @@ SYM_CODE_END(el1_sync)
.align 6
SYM_CODE_START_LOCAL_NOALIGN(el1_irq)
kernel_entry 1
gic_prio_irq_setup pmr=x20, tmp=x1
enable_da_f
mov x0, sp
bl enter_el1_irq_or_nmi
irq_handler
#ifdef CONFIG_PREEMPTION
ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count
alternative_if ARM64_HAS_IRQ_PRIO_MASKING
/*
* DA_F were cleared at start of handling. If anything is set in DAIF,
* we come back from an NMI, so skip preemption
*/
mrs x0, daif
orr x24, x24, x0
alternative_else_nop_endif
cbnz x24, 1f // preempt count != 0 || NMI return path
bl arm64_preempt_schedule_irq // irq en/disable is done inside
1:
#endif
mov x0, sp
bl exit_el1_irq_or_nmi
el1_interrupt_handler handle_arch_irq
kernel_exit 1
SYM_CODE_END(el1_irq)
@ -739,22 +744,13 @@ SYM_CODE_END(el0_error_compat)
SYM_CODE_START_LOCAL_NOALIGN(el0_irq)
kernel_entry 0
el0_irq_naked:
gic_prio_irq_setup pmr=x20, tmp=x0
user_exit_irqoff
enable_da_f
tbz x22, #55, 1f
bl do_el0_irq_bp_hardening
1:
irq_handler
el0_interrupt_handler handle_arch_irq
b ret_to_user
SYM_CODE_END(el0_irq)
SYM_CODE_START_LOCAL(el1_error)
kernel_entry 1
mrs x1, esr_el1
gic_prio_kentry_setup tmp=x2
enable_dbg
mov x0, sp
bl do_serror
@ -765,7 +761,6 @@ SYM_CODE_START_LOCAL(el0_error)
kernel_entry 0
el0_error_naked:
mrs x25, esr_el1
gic_prio_kentry_setup tmp=x2
user_exit_irqoff
enable_dbg
mov x0, sp

View file

@ -55,8 +55,10 @@ void __sync_icache_dcache(pte_t pte)
{
struct page *page = pte_page(pte);
if (!test_and_set_bit(PG_dcache_clean, &page->flags))
if (!test_bit(PG_dcache_clean, &page->flags)) {
sync_icache_aliases(page_address(page), page_size(page));
set_bit(PG_dcache_clean, &page->flags);
}
}
EXPORT_SYMBOL_GPL(__sync_icache_dcache);

View file

@ -445,6 +445,18 @@ SYM_FUNC_START(__cpu_setup)
mov x10, #(SYS_GCR_EL1_RRND | SYS_GCR_EL1_EXCL_MASK)
msr_s SYS_GCR_EL1, x10
/*
* If GCR_EL1.RRND=1 is implemented the same way as RRND=0, then
* RGSR_EL1.SEED must be non-zero for IRG to produce
* pseudorandom numbers. As RGSR_EL1 is UNKNOWN out of reset, we
* must initialize it.
*/
mrs x10, CNTVCT_EL0
ands x10, x10, #SYS_RGSR_EL1_SEED_MASK
csinc x10, x10, xzr, ne
lsl x10, x10, #SYS_RGSR_EL1_SEED_SHIFT
msr_s SYS_RGSR_EL1, x10
/* clear any pending tag check faults in TFSR*_EL1 */
msr_s SYS_TFSR_EL1, xzr
msr_s SYS_TFSRE0_EL1, xzr

View file

@ -14,16 +14,20 @@
struct elf64_shdr; /* forward declration */
struct mod_arch_specific {
/* Used only at module load time. */
struct elf64_shdr *core_plt; /* core PLT section */
struct elf64_shdr *init_plt; /* init PLT section */
struct elf64_shdr *got; /* global offset table */
struct elf64_shdr *opd; /* official procedure descriptors */
struct elf64_shdr *unwind; /* unwind-table section */
unsigned long gp; /* global-pointer for module */
unsigned int next_got_entry; /* index of next available got entry */
/* Used at module run and cleanup time. */
void *core_unw_table; /* core unwind-table cookie returned by unwinder */
void *init_unw_table; /* init unwind-table cookie returned by unwinder */
unsigned int next_got_entry; /* index of next available got entry */
void *opd_addr; /* symbolize uses .opd to get to actual function */
unsigned long opd_size;
};
#define ARCH_SHF_SMALL SHF_IA_64_SHORT

View file

@ -905,9 +905,31 @@ register_unwind_table (struct module *mod)
int
module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod)
{
struct mod_arch_specific *mas = &mod->arch;
DEBUGP("%s: init: entry=%p\n", __func__, mod->init);
if (mod->arch.unwind)
if (mas->unwind)
register_unwind_table(mod);
/*
* ".opd" was already relocated to the final destination. Store
* it's address for use in symbolizer.
*/
mas->opd_addr = (void *)mas->opd->sh_addr;
mas->opd_size = mas->opd->sh_size;
/*
* Module relocation was already done at this point. Section
* headers are about to be deleted. Wipe out load-time context.
*/
mas->core_plt = NULL;
mas->init_plt = NULL;
mas->got = NULL;
mas->opd = NULL;
mas->unwind = NULL;
mas->gp = 0;
mas->next_got_entry = 0;
return 0;
}
@ -926,10 +948,9 @@ module_arch_cleanup (struct module *mod)
void *dereference_module_function_descriptor(struct module *mod, void *ptr)
{
Elf64_Shdr *opd = mod->arch.opd;
struct mod_arch_specific *mas = &mod->arch;
if (ptr < (void *)opd->sh_addr ||
ptr >= (void *)(opd->sh_addr + opd->sh_size))
if (ptr < mas->opd_addr || ptr >= mas->opd_addr + mas->opd_size)
return ptr;
return dereference_function_descriptor(ptr);

View file

@ -1,5 +1,5 @@
/*
* Copyright (C) 2000, 2004 Maciej W. Rozycki
* Copyright (C) 2000, 2004, 2021 Maciej W. Rozycki
* Copyright (C) 2003, 07 Ralf Baechle (ralf@linux-mips.org)
*
* This file is subject to the terms and conditions of the GNU General Public
@ -9,25 +9,18 @@
#ifndef __ASM_DIV64_H
#define __ASM_DIV64_H
#include <asm-generic/div64.h>
#include <asm/bitsperlong.h>
#if BITS_PER_LONG == 64
#include <linux/types.h>
#if BITS_PER_LONG == 32
/*
* No traps on overflows for any of these...
*/
#define __div64_32(n, base) \
({ \
#define do_div64_32(res, high, low, base) ({ \
unsigned long __cf, __tmp, __tmp2, __i; \
unsigned long __quot32, __mod32; \
unsigned long __high, __low; \
unsigned long long __n; \
\
__high = *__n >> 32; \
__low = __n; \
__asm__( \
" .set push \n" \
" .set noat \n" \
@ -51,18 +44,48 @@
" subu %0, %0, %z6 \n" \
" addiu %2, %2, 1 \n" \
"3: \n" \
" bnez %4, 0b\n\t" \
" srl %5, %1, 0x1f\n\t" \
" bnez %4, 0b \n" \
" srl %5, %1, 0x1f \n" \
" .set pop" \
: "=&r" (__mod32), "=&r" (__tmp), \
"=&r" (__quot32), "=&r" (__cf), \
"=&r" (__i), "=&r" (__tmp2) \
: "Jr" (base), "0" (__high), "1" (__low)); \
: "Jr" (base), "0" (high), "1" (low)); \
\
(__n) = __quot32; \
(res) = __quot32; \
__mod32; \
})
#endif /* BITS_PER_LONG == 64 */
#define __div64_32(n, base) ({ \
unsigned long __upper, __low, __high, __radix; \
unsigned long long __quot; \
unsigned long long __div; \
unsigned long __mod; \
\
__div = (*n); \
__radix = (base); \
\
__high = __div >> 32; \
__low = __div; \
\
if (__high < __radix) { \
__upper = __high; \
__high = 0; \
} else { \
__upper = __high % __radix; \
__high /= __radix; \
} \
\
__mod = do_div64_32(__low, __upper, __low, __radix); \
\
__quot = __high; \
__quot = __quot << 32 | __low; \
(*n) = __quot; \
__mod; \
})
#endif /* BITS_PER_LONG == 32 */
#include <asm-generic/div64.h>
#endif /* __ASM_DIV64_H */

View file

@ -1739,7 +1739,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
set_isa(c, MIPS_CPU_ISA_M64R2);
break;
}
c->writecombine = _CACHE_UNCACHED_ACCELERATED;
c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_EXT |
MIPS_ASE_LOONGSON_EXT2);
break;
@ -1769,7 +1768,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
* register, we correct it here.
*/
c->options |= MIPS_CPU_FTLB | MIPS_CPU_TLBINV | MIPS_CPU_LDPTE;
c->writecombine = _CACHE_UNCACHED_ACCELERATED;
c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
MIPS_ASE_LOONGSON_EXT | MIPS_ASE_LOONGSON_EXT2);
c->ases &= ~MIPS_ASE_VZ; /* VZ of Loongson-3A2000/3000 is incomplete */
@ -1780,7 +1778,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
set_elf_platform(cpu, "loongson3a");
set_isa(c, MIPS_CPU_ISA_M64R2);
decode_cpucfg(c);
c->writecombine = _CACHE_UNCACHED_ACCELERATED;
break;
default:
panic("Unknown Loongson Processor ID!");

View file

@ -338,11 +338,7 @@ label:
lis r1, emergency_ctx@ha
#endif
lwz r1, emergency_ctx@l(r1)
cmpwi cr1, r1, 0
bne cr1, 1f
lis r1, init_thread_union@ha
addi r1, r1, init_thread_union@l
1: addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE
addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE
EXCEPTION_PROLOG_2
SAVE_NVGPRS(r11)
addi r3, r1, STACK_FRAME_OVERHEAD

View file

@ -1050,7 +1050,7 @@ int iommu_take_ownership(struct iommu_table *tbl)
spin_lock_irqsave(&tbl->large_pool.lock, flags);
for (i = 0; i < tbl->nr_pools; i++)
spin_lock(&tbl->pools[i].lock);
spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
iommu_table_release_pages(tbl);
@ -1078,7 +1078,7 @@ void iommu_release_ownership(struct iommu_table *tbl)
spin_lock_irqsave(&tbl->large_pool.lock, flags);
for (i = 0; i < tbl->nr_pools; i++)
spin_lock(&tbl->pools[i].lock);
spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
memset(tbl->it_map, 0, sz);

View file

@ -164,7 +164,7 @@ void __init irqstack_early_init(void)
}
#ifdef CONFIG_VMAP_STACK
void *emergency_ctx[NR_CPUS] __ro_after_init;
void *emergency_ctx[NR_CPUS] __ro_after_init = {[0] = &init_stack};
void __init emergency_stack_init(void)
{

View file

@ -1442,6 +1442,9 @@ void start_secondary(void *unused)
vdso_getcpu_init();
#endif
set_numa_node(numa_cpu_lookup_table[cpu]);
set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
/* Update topology CPU masks */
add_cpu_to_masks(cpu);
@ -1460,9 +1463,6 @@ void start_secondary(void *unused)
shared_caches = true;
}
set_numa_node(numa_cpu_lookup_table[cpu]);
set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
smp_wmb();
notify_cpu_starting(cpu);
set_cpu_online(cpu, true);

View file

@ -14,6 +14,7 @@
#include <linux/string.h>
#include <linux/init.h>
#include <linux/sched/mm.h>
#include <linux/stop_machine.h>
#include <asm/cputable.h>
#include <asm/code-patching.h>
#include <asm/page.h>
@ -227,11 +228,25 @@ static void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
: "unknown");
}
static int __do_stf_barrier_fixups(void *data)
{
enum stf_barrier_type *types = data;
do_stf_entry_barrier_fixups(*types);
do_stf_exit_barrier_fixups(*types);
return 0;
}
void do_stf_barrier_fixups(enum stf_barrier_type types)
{
do_stf_entry_barrier_fixups(types);
do_stf_exit_barrier_fixups(types);
/*
* The call to the fallback entry flush, and the fallback/sync-ori exit
* flush can not be safely patched in/out while other CPUs are executing
* them. So call __do_stf_barrier_fixups() on one CPU while all other CPUs
* spin in the stop machine core with interrupts hard disabled.
*/
stop_machine(__do_stf_barrier_fixups, &types, NULL);
}
void do_uaccess_flush_fixups(enum l1d_flush_type types)
@ -284,8 +299,9 @@ void do_uaccess_flush_fixups(enum l1d_flush_type types)
: "unknown");
}
void do_entry_flush_fixups(enum l1d_flush_type types)
static int __do_entry_flush_fixups(void *data)
{
enum l1d_flush_type types = *(enum l1d_flush_type *)data;
unsigned int instrs[3], *dest;
long *start, *end;
int i;
@ -354,6 +370,19 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
: "ori type" :
(types & L1D_FLUSH_MTTRIG) ? "mttrig type"
: "unknown");
return 0;
}
void do_entry_flush_fixups(enum l1d_flush_type types)
{
/*
* The call to the fallback flush can not be safely patched in/out while
* other CPUs are executing it. So call __do_entry_flush_fixups() on one
* CPU while all other CPUs spin in the stop machine core with interrupts
* hard disabled.
*/
stop_machine(__do_entry_flush_fixups, &types, NULL);
}
void do_rfi_flush_fixups(enum l1d_flush_type types)

View file

@ -336,7 +336,7 @@ repeat:
int htab_remove_mapping(unsigned long vstart, unsigned long vend,
int psize, int ssize)
{
unsigned long vaddr;
unsigned long vaddr, time_limit;
unsigned int step, shift;
int rc;
int ret = 0;
@ -349,8 +349,19 @@ int htab_remove_mapping(unsigned long vstart, unsigned long vend,
/* Unmap the full range specificied */
vaddr = ALIGN_DOWN(vstart, step);
time_limit = jiffies + HZ;
for (;vaddr < vend; vaddr += step) {
rc = mmu_hash_ops.hpte_removebolted(vaddr, psize, ssize);
/*
* For large number of mappings introduce a cond_resched()
* to prevent softlockup warnings.
*/
if (time_after(jiffies, time_limit)) {
cond_resched();
time_limit = jiffies + HZ;
}
if (rc == -ENOENT) {
ret = -ENOENT;
continue;

View file

@ -47,9 +47,6 @@ static void rtas_stop_self(void)
BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE);
printk("cpu %u (hwid %u) Ready to die...\n",
smp_processor_id(), hard_smp_processor_id());
rtas_call_unlocked(&args, rtas_stop_self_token, 0, 1, NULL);
panic("Alas, I survived.\n");

View file

@ -54,7 +54,7 @@ int riscv_hartid_to_cpuid(int hartid)
return i;
pr_err("Couldn't find cpu id for hartid [%d]\n", hartid);
return i;
return -ENOENT;
}
void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out)

View file

@ -588,6 +588,21 @@ DECLARE_IDTENTRY_RAW(X86_TRAP_MC, exc_machine_check);
#endif
/* NMI */
#if defined(CONFIG_X86_64) && IS_ENABLED(CONFIG_KVM_INTEL)
/*
* Special NOIST entry point for VMX which invokes this on the kernel
* stack. asm_exc_nmi() requires an IST to work correctly vs. the NMI
* 'executing' marker.
*
* On 32bit this just uses the regular NMI entry point because 32-bit does
* not have ISTs.
*/
DECLARE_IDTENTRY(X86_TRAP_NMI, exc_nmi_noist);
#else
#define asm_exc_nmi_noist asm_exc_nmi
#endif
DECLARE_IDTENTRY_NMI(X86_TRAP_NMI, exc_nmi);
#ifdef CONFIG_XEN_PV
DECLARE_IDTENTRY_RAW(X86_TRAP_NMI, xenpv_exc_nmi);

View file

@ -358,8 +358,6 @@ struct kvm_mmu {
int (*sync_page)(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp);
void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa);
void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
u64 *spte, const void *pte);
hpa_t root_hpa;
gpa_t root_pgd;
union kvm_mmu_role mmu_role;
@ -1019,7 +1017,6 @@ struct kvm_arch {
struct kvm_vm_stat {
ulong mmu_shadow_zapped;
ulong mmu_pte_write;
ulong mmu_pte_updated;
ulong mmu_pde_zapped;
ulong mmu_flooded;
ulong mmu_recycled;
@ -1671,6 +1668,7 @@ int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
unsigned long icr, int op_64_bit);
void kvm_define_user_return_msr(unsigned index, u32 msr);
int kvm_probe_user_return_msr(u32 msr);
int kvm_set_user_return_msr(unsigned index, u64 val, u64 mask);
u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc);

View file

@ -524,6 +524,16 @@ nmi_restart:
mds_user_clear_cpu_buffers();
}
#if defined(CONFIG_X86_64) && IS_ENABLED(CONFIG_KVM_INTEL)
DEFINE_IDTENTRY_RAW(exc_nmi_noist)
{
exc_nmi(regs);
}
#endif
#if IS_MODULE(CONFIG_KVM_INTEL)
EXPORT_SYMBOL_GPL(asm_exc_nmi_noist);
#endif
void stop_nmi(void)
{
ignore_nmis++;

View file

@ -572,7 +572,8 @@ static int __do_cpuid_func_emulated(struct kvm_cpuid_array *array, u32 func)
case 7:
entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
entry->eax = 0;
entry->ecx = F(RDPID);
if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP))
entry->ecx = F(RDPID);
++array->nent;
default:
break;

View file

@ -4502,7 +4502,7 @@ static const struct opcode group8[] = {
* from the register case of group9.
*/
static const struct gprefix pfx_0f_c7_7 = {
N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdtscp),
N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdpid),
};

View file

@ -468,6 +468,7 @@ enum x86_intercept {
x86_intercept_clgi,
x86_intercept_skinit,
x86_intercept_rdtscp,
x86_intercept_rdpid,
x86_intercept_icebp,
x86_intercept_wbinvd,
x86_intercept_monitor,

View file

@ -1908,8 +1908,8 @@ void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
if (!apic->lapic_timer.hv_timer_in_use)
goto out;
WARN_ON(rcuwait_active(&vcpu->wait));
cancel_hv_timer(apic);
apic_timer_expired(apic, false);
cancel_hv_timer(apic);
if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
advance_periodic_target_expiration(apic);

View file

@ -1715,13 +1715,6 @@ static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
return 0;
}
static void nonpaging_update_pte(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp, u64 *spte,
const void *pte)
{
WARN_ON(1);
}
#define KVM_PAGE_ARRAY_NR 16
struct kvm_mmu_pages {
@ -3820,7 +3813,6 @@ static void nonpaging_init_context(struct kvm_vcpu *vcpu,
context->gva_to_gpa = nonpaging_gva_to_gpa;
context->sync_page = nonpaging_sync_page;
context->invlpg = NULL;
context->update_pte = nonpaging_update_pte;
context->root_level = 0;
context->shadow_root_level = PT32E_ROOT_LEVEL;
context->direct_map = true;
@ -4402,7 +4394,6 @@ static void paging64_init_context_common(struct kvm_vcpu *vcpu,
context->gva_to_gpa = paging64_gva_to_gpa;
context->sync_page = paging64_sync_page;
context->invlpg = paging64_invlpg;
context->update_pte = paging64_update_pte;
context->shadow_root_level = level;
context->direct_map = false;
}
@ -4431,7 +4422,6 @@ static void paging32_init_context(struct kvm_vcpu *vcpu,
context->gva_to_gpa = paging32_gva_to_gpa;
context->sync_page = paging32_sync_page;
context->invlpg = paging32_invlpg;
context->update_pte = paging32_update_pte;
context->shadow_root_level = PT32E_ROOT_LEVEL;
context->direct_map = false;
}
@ -4513,7 +4503,6 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
context->page_fault = kvm_tdp_page_fault;
context->sync_page = nonpaging_sync_page;
context->invlpg = NULL;
context->update_pte = nonpaging_update_pte;
context->shadow_root_level = kvm_mmu_get_tdp_level(vcpu);
context->direct_map = true;
context->get_guest_pgd = get_cr3;
@ -4690,7 +4679,6 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
context->gva_to_gpa = ept_gva_to_gpa;
context->sync_page = ept_sync_page;
context->invlpg = ept_invlpg;
context->update_pte = ept_update_pte;
context->root_level = level;
context->direct_map = false;
context->mmu_role.as_u64 = new_role.as_u64;
@ -4838,19 +4826,6 @@ void kvm_mmu_unload(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_mmu_unload);
static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp, u64 *spte,
const void *new)
{
if (sp->role.level != PG_LEVEL_4K) {
++vcpu->kvm->stat.mmu_pde_zapped;
return;
}
++vcpu->kvm->stat.mmu_pte_updated;
vcpu->arch.mmu->update_pte(vcpu, sp, spte, new);
}
static bool need_remote_flush(u64 old, u64 new)
{
if (!is_shadow_present_pte(old))
@ -4966,22 +4941,6 @@ static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
return spte;
}
/*
* Ignore various flags when determining if a SPTE can be immediately
* overwritten for the current MMU.
* - level: explicitly checked in mmu_pte_write_new_pte(), and will never
* match the current MMU role, as MMU's level tracks the root level.
* - access: updated based on the new guest PTE
* - quadrant: handled by get_written_sptes()
* - invalid: always false (loop only walks valid shadow pages)
*/
static const union kvm_mmu_page_role role_ign = {
.level = 0xf,
.access = 0x7,
.quadrant = 0x3,
.invalid = 0x1,
};
static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
const u8 *new, int bytes,
struct kvm_page_track_notifier_node *node)
@ -5032,14 +4991,10 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
local_flush = true;
while (npte--) {
u32 base_role = vcpu->arch.mmu->mmu_role.base.word;
entry = *spte;
mmu_page_zap_pte(vcpu->kvm, sp, spte, NULL);
if (gentry &&
!((sp->role.word ^ base_role) & ~role_ign.word) &&
rmap_can_add(vcpu))
mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
if (gentry && sp->role.level != PG_LEVEL_4K)
++vcpu->kvm->stat.mmu_pde_zapped;
if (need_remote_flush(entry, *spte))
remote_flush = true;
++spte;

View file

@ -3139,15 +3139,8 @@ static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu)
nested_vmx_handle_enlightened_vmptrld(vcpu, false);
if (evmptrld_status == EVMPTRLD_VMFAIL ||
evmptrld_status == EVMPTRLD_ERROR) {
pr_debug_ratelimited("%s: enlightened vmptrld failed\n",
__func__);
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror =
KVM_INTERNAL_ERROR_EMULATION;
vcpu->run->internal.ndata = 0;
evmptrld_status == EVMPTRLD_ERROR)
return false;
}
}
return true;
@ -3235,8 +3228,16 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu)
{
if (!nested_get_evmcs_page(vcpu))
if (!nested_get_evmcs_page(vcpu)) {
pr_debug_ratelimited("%s: enlightened vmptrld failed\n",
__func__);
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror =
KVM_INTERNAL_ERROR_EMULATION;
vcpu->run->internal.ndata = 0;
return false;
}
if (is_guest_mode(vcpu) && !nested_get_vmcs12_pages(vcpu))
return false;
@ -4441,7 +4442,15 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
/* trying to cancel vmlaunch/vmresume is a bug */
WARN_ON_ONCE(vmx->nested.nested_run_pending);
kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
/*
* KVM_REQ_GET_NESTED_STATE_PAGES is also used to map
* Enlightened VMCS after migration and we still need to
* do that when something is forcing L2->L1 exit prior to
* the first L2 run.
*/
(void)nested_get_evmcs_page(vcpu);
}
/* Service the TLB flush request for L2 before switching to L1. */
if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))

View file

@ -36,6 +36,7 @@
#include <asm/debugreg.h>
#include <asm/desc.h>
#include <asm/fpu/internal.h>
#include <asm/idtentry.h>
#include <asm/io.h>
#include <asm/irq_remapping.h>
#include <asm/kexec.h>
@ -6354,18 +6355,17 @@ static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu)
void vmx_do_interrupt_nmi_irqoff(unsigned long entry);
static void handle_interrupt_nmi_irqoff(struct kvm_vcpu *vcpu, u32 intr_info)
static void handle_interrupt_nmi_irqoff(struct kvm_vcpu *vcpu,
unsigned long entry)
{
unsigned int vector = intr_info & INTR_INFO_VECTOR_MASK;
gate_desc *desc = (gate_desc *)host_idt_base + vector;
kvm_before_interrupt(vcpu);
vmx_do_interrupt_nmi_irqoff(gate_offset(desc));
vmx_do_interrupt_nmi_irqoff(entry);
kvm_after_interrupt(vcpu);
}
static void handle_exception_nmi_irqoff(struct vcpu_vmx *vmx)
{
const unsigned long nmi_entry = (unsigned long)asm_exc_nmi_noist;
u32 intr_info = vmx_get_intr_info(&vmx->vcpu);
/* if exit due to PF check for async PF */
@ -6376,18 +6376,20 @@ static void handle_exception_nmi_irqoff(struct vcpu_vmx *vmx)
kvm_machine_check();
/* We need to handle NMIs before interrupts are enabled */
else if (is_nmi(intr_info))
handle_interrupt_nmi_irqoff(&vmx->vcpu, intr_info);
handle_interrupt_nmi_irqoff(&vmx->vcpu, nmi_entry);
}
static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu)
{
u32 intr_info = vmx_get_intr_info(vcpu);
unsigned int vector = intr_info & INTR_INFO_VECTOR_MASK;
gate_desc *desc = (gate_desc *)host_idt_base + vector;
if (WARN_ONCE(!is_external_intr(intr_info),
"KVM: unexpected VM-Exit interrupt info: 0x%x", intr_info))
return;
handle_interrupt_nmi_irqoff(vcpu, intr_info);
handle_interrupt_nmi_irqoff(vcpu, gate_offset(desc));
}
static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
@ -6862,12 +6864,9 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
for (i = 0; i < ARRAY_SIZE(vmx_uret_msrs_list); ++i) {
u32 index = vmx_uret_msrs_list[i];
u32 data_low, data_high;
int j = vmx->nr_uret_msrs;
if (rdmsr_safe(index, &data_low, &data_high) < 0)
continue;
if (wrmsr_safe(index, data_low, data_high) < 0)
if (kvm_probe_user_return_msr(index))
continue;
vmx->guest_uret_msrs[j].slot = i;
@ -7300,9 +7299,11 @@ static __init void vmx_set_cpu_caps(void)
if (!cpu_has_vmx_xsaves())
kvm_cpu_cap_clear(X86_FEATURE_XSAVES);
/* CPUID 0x80000001 */
if (!cpu_has_vmx_rdtscp())
/* CPUID 0x80000001 and 0x7 (RDPID) */
if (!cpu_has_vmx_rdtscp()) {
kvm_cpu_cap_clear(X86_FEATURE_RDTSCP);
kvm_cpu_cap_clear(X86_FEATURE_RDPID);
}
if (cpu_has_vmx_waitpkg())
kvm_cpu_cap_check_and_set(X86_FEATURE_WAITPKG);
@ -7358,8 +7359,9 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu,
/*
* RDPID causes #UD if disabled through secondary execution controls.
* Because it is marked as EmulateOnUD, we need to intercept it here.
* Note, RDPID is hidden behind ENABLE_RDTSCP.
*/
case x86_intercept_rdtscp:
case x86_intercept_rdpid:
if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_RDTSCP)) {
exception->vector = UD_VECTOR;
exception->error_code_valid = false;

View file

@ -233,7 +233,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
VM_STAT("mmu_shadow_zapped", mmu_shadow_zapped),
VM_STAT("mmu_pte_write", mmu_pte_write),
VM_STAT("mmu_pte_updated", mmu_pte_updated),
VM_STAT("mmu_pde_zapped", mmu_pde_zapped),
VM_STAT("mmu_flooded", mmu_flooded),
VM_STAT("mmu_recycled", mmu_recycled),
@ -323,6 +322,22 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
}
}
int kvm_probe_user_return_msr(u32 msr)
{
u64 val;
int ret;
preempt_disable();
ret = rdmsrl_safe(msr, &val);
if (ret)
goto out;
ret = wrmsrl_safe(msr, val);
out:
preempt_enable();
return ret;
}
EXPORT_SYMBOL_GPL(kvm_probe_user_return_msr);
void kvm_define_user_return_msr(unsigned slot, u32 msr)
{
BUG_ON(slot >= KVM_MAX_NR_USER_RETURN_MSRS);
@ -7849,6 +7864,18 @@ static void pvclock_gtod_update_fn(struct work_struct *work)
static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn);
/*
* Indirection to move queue_work() out of the tk_core.seq write held
* region to prevent possible deadlocks against time accessors which
* are invoked with work related locks held.
*/
static void pvclock_irq_work_fn(struct irq_work *w)
{
queue_work(system_long_wq, &pvclock_gtod_work);
}
static DEFINE_IRQ_WORK(pvclock_irq_work, pvclock_irq_work_fn);
/*
* Notification about pvclock gtod data update.
*/
@ -7860,13 +7887,14 @@ static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused,
update_pvclock_gtod(tk);
/* disable master clock if host does not trust, or does not
* use, TSC based clocksource.
/*
* Disable master clock if host does not trust, or does not use,
* TSC based clocksource. Delegate queue_work() to irq_work as
* this is invoked with tk_core.seq write held.
*/
if (!gtod_is_based_on_tsc(gtod->clock.vclock_mode) &&
atomic_read(&kvm_guest_has_master_clock) != 0)
queue_work(system_long_wq, &pvclock_gtod_work);
irq_work_queue(&pvclock_irq_work);
return 0;
}
@ -7982,6 +8010,8 @@ void kvm_arch_exit(void)
cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE);
#ifdef CONFIG_X86_64
pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier);
irq_work_sync(&pvclock_irq_work);
cancel_work_sync(&pvclock_gtod_work);
#endif
kvm_x86_ops.hardware_enable = NULL;
kvm_mmu_module_exit();

View file

@ -2210,10 +2210,9 @@ static void bfq_remove_request(struct request_queue *q,
}
static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
static bool bfq_bio_merge(struct request_queue *q, struct bio *bio,
unsigned int nr_segs)
{
struct request_queue *q = hctx->queue;
struct bfq_data *bfqd = q->elevator->elevator_data;
struct request *free = NULL;
/*

View file

@ -1023,7 +1023,17 @@ static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
lockdep_assert_held(&ioc->lock);
inuse = clamp_t(u32, inuse, 1, active);
/*
* For an active leaf node, its inuse shouldn't be zero or exceed
* @active. An active internal node's inuse is solely determined by the
* inuse to active ratio of its children regardless of @inuse.
*/
if (list_empty(&iocg->active_list) && iocg->child_active_sum) {
inuse = DIV64_U64_ROUND_UP(active * iocg->child_inuse_sum,
iocg->child_active_sum);
} else {
inuse = clamp_t(u32, inuse, 1, active);
}
iocg->last_inuse = iocg->inuse;
if (save)
@ -1040,7 +1050,7 @@ static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
/* update the level sums */
parent->child_active_sum += (s32)(active - child->active);
parent->child_inuse_sum += (s32)(inuse - child->inuse);
/* apply the udpates */
/* apply the updates */
child->active = active;
child->inuse = inuse;

View file

@ -348,14 +348,16 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
unsigned int nr_segs)
{
struct elevator_queue *e = q->elevator;
struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
struct blk_mq_ctx *ctx;
struct blk_mq_hw_ctx *hctx;
bool ret = false;
enum hctx_type type;
if (e && e->type->ops.bio_merge)
return e->type->ops.bio_merge(hctx, bio, nr_segs);
return e->type->ops.bio_merge(q, bio, nr_segs);
ctx = blk_mq_get_ctx(q);
hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
type = hctx->type;
if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) ||
list_empty_careful(&ctx->rq_lists[type]))

View file

@ -2204,8 +2204,9 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
/* Bypass scheduler for flush requests */
blk_insert_flush(rq);
blk_mq_run_hw_queue(data.hctx, true);
} else if (plug && (q->nr_hw_queues == 1 || q->mq_ops->commit_rqs ||
!blk_queue_nonrot(q))) {
} else if (plug && (q->nr_hw_queues == 1 ||
blk_mq_is_sbitmap_shared(rq->mq_hctx->flags) ||
q->mq_ops->commit_rqs || !blk_queue_nonrot(q))) {
/*
* Use plugging if we have a ->commit_rqs() hook as well, as
* we know the driver uses bd->last in a smart fashion.

View file

@ -562,11 +562,12 @@ static void kyber_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
}
}
static bool kyber_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
static bool kyber_bio_merge(struct request_queue *q, struct bio *bio,
unsigned int nr_segs)
{
struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
struct kyber_hctx_data *khd = hctx->sched_data;
struct blk_mq_ctx *ctx = blk_mq_get_ctx(hctx->queue);
struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw[hctx->type]];
unsigned int sched_domain = kyber_sched_domain(bio->bi_opf);
struct list_head *rq_list = &kcq->rq_list[sched_domain];

View file

@ -461,10 +461,9 @@ static int dd_request_merge(struct request_queue *q, struct request **rq,
return ELEVATOR_NO_MERGE;
}
static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
unsigned int nr_segs)
{
struct request_queue *q = hctx->queue;
struct deadline_data *dd = q->elevator->elevator_data;
struct request *free = NULL;
bool ret;

View file

@ -1301,6 +1301,7 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
{"PNP0C0B", }, /* Generic ACPI fan */
{"INT3404", }, /* Fan */
{"INTC1044", }, /* Fan for Tiger Lake generation */
{"INTC1048", }, /* Fan for Alder Lake generation */
{}
};
struct acpi_device *adev = ACPI_COMPANION(dev);

View file

@ -705,6 +705,7 @@ int acpi_device_add(struct acpi_device *device,
result = acpi_device_set_name(device, acpi_device_bus_id);
if (result) {
kfree_const(acpi_device_bus_id->bus_id);
kfree(acpi_device_bus_id);
goto err_unlock;
}

View file

@ -1637,6 +1637,7 @@ void pm_runtime_init(struct device *dev)
dev->power.request_pending = false;
dev->power.request = RPM_REQ_NONE;
dev->power.deferred_resume = false;
dev->power.needs_force_resume = 0;
INIT_WORK(&dev->power.work, pm_runtime_work);
dev->power.timer_expires = 0;
@ -1804,10 +1805,12 @@ int pm_runtime_force_suspend(struct device *dev)
* its parent, but set its status to RPM_SUSPENDED anyway in case this
* function will be called again for it in the meantime.
*/
if (pm_runtime_need_not_resume(dev))
if (pm_runtime_need_not_resume(dev)) {
pm_runtime_set_suspended(dev);
else
} else {
__update_runtime_status(dev, RPM_SUSPENDED);
dev->power.needs_force_resume = 1;
}
return 0;
@ -1834,7 +1837,7 @@ int pm_runtime_force_resume(struct device *dev)
int (*callback)(struct device *);
int ret = 0;
if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev))
if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume)
goto out;
/*
@ -1853,6 +1856,7 @@ int pm_runtime_force_resume(struct device *dev)
pm_runtime_mark_last_busy(dev);
out:
dev->power.needs_force_resume = 0;
pm_runtime_enable(dev);
return ret;
}

View file

@ -2031,7 +2031,8 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
* config ref and try to destroy the workqueue from inside the work
* queue.
*/
flush_workqueue(nbd->recv_workq);
if (nbd->recv_workq)
flush_workqueue(nbd->recv_workq);
if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
&nbd->config->runtime_flags))
nbd_config_put(nbd);

View file

@ -679,7 +679,11 @@ static void remap_devs(struct rnbd_clt_session *sess)
return;
}
rtrs_clt_query(sess->rtrs, &attrs);
err = rtrs_clt_query(sess->rtrs, &attrs);
if (err) {
pr_err("rtrs_clt_query(\"%s\"): %d\n", sess->sessname, err);
return;
}
mutex_lock(&sess->lock);
sess->max_io_size = attrs.max_io_size;
@ -1211,7 +1215,11 @@ find_and_get_or_create_sess(const char *sessname,
err = PTR_ERR(sess->rtrs);
goto wake_up_and_put;
}
rtrs_clt_query(sess->rtrs, &attrs);
err = rtrs_clt_query(sess->rtrs, &attrs);
if (err)
goto close_rtrs;
sess->max_io_size = attrs.max_io_size;
sess->queue_depth = attrs.queue_depth;

View file

@ -79,7 +79,7 @@ struct rnbd_clt_session {
DECLARE_BITMAP(cpu_queues_bm, NR_CPUS);
int __percpu *cpu_rr; /* per-cpu var for CPU round-robin */
atomic_t busy;
int queue_depth;
size_t queue_depth;
u32 max_io_size;
struct blk_mq_tag_set tag_set;
struct mutex lock; /* protects state and devs_list */

View file

@ -392,7 +392,9 @@ static const struct usb_device_id blacklist_table[] = {
/* MediaTek Bluetooth devices */
{ USB_VENDOR_AND_INTERFACE_INFO(0x0e8d, 0xe0, 0x01, 0x01),
.driver_info = BTUSB_MEDIATEK },
.driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
/* Additional Realtek 8723AE Bluetooth devices */
{ USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK },

View file

@ -656,6 +656,7 @@ int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip)
if (nr_commands !=
be32_to_cpup((__be32 *)&buf.data[TPM_HEADER_SIZE + 5])) {
rc = -EFAULT;
tpm_buf_destroy(&buf);
goto out;
}

View file

@ -709,16 +709,14 @@ static int tpm_tis_gen_interrupt(struct tpm_chip *chip)
cap_t cap;
int ret;
/* TPM 2.0 */
if (chip->flags & TPM_CHIP_FLAG_TPM2)
return tpm2_get_tpm_pt(chip, 0x100, &cap2, desc);
/* TPM 1.2 */
ret = request_locality(chip, 0);
if (ret < 0)
return ret;
ret = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0);
if (chip->flags & TPM_CHIP_FLAG_TPM2)
ret = tpm2_get_tpm_pt(chip, 0x100, &cap2, desc);
else
ret = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0);
release_locality(chip, 0);
@ -1127,12 +1125,20 @@ int tpm_tis_resume(struct device *dev)
if (ret)
return ret;
/* TPM 1.2 requires self-test on resume. This function actually returns
/*
* TPM 1.2 requires self-test on resume. This function actually returns
* an error code but for unknown reason it isn't handled.
*/
if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
ret = request_locality(chip, 0);
if (ret < 0)
return ret;
tpm1_do_selftest(chip);
release_locality(chip, 0);
}
return 0;
}
EXPORT_SYMBOL_GPL(tpm_tis_resume);

View file

@ -537,8 +537,13 @@ static const struct samsung_gate_clock top1_gate_clks[] __initconst = {
GATE(CLK_ACLK_FSYS0_200, "aclk_fsys0_200", "dout_aclk_fsys0_200",
ENABLE_ACLK_TOP13, 28, CLK_SET_RATE_PARENT |
CLK_IS_CRITICAL, 0),
/*
* This clock is required for the CMU_FSYS1 registers access, keep it
* enabled permanently until proper runtime PM support is added.
*/
GATE(CLK_ACLK_FSYS1_200, "aclk_fsys1_200", "dout_aclk_fsys1_200",
ENABLE_ACLK_TOP13, 24, CLK_SET_RATE_PARENT, 0),
ENABLE_ACLK_TOP13, 24, CLK_SET_RATE_PARENT |
CLK_IS_CRITICAL, 0),
GATE(CLK_SCLK_PHY_FSYS1_26M, "sclk_phy_fsys1_26m",
"dout_sclk_phy_fsys1_26m", ENABLE_SCLK_TOP1_FSYS11,

View file

@ -2,6 +2,7 @@
#include <linux/clk.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/cpuhotplug.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
@ -530,17 +531,17 @@ static void omap_clockevent_unidle(struct clock_event_device *evt)
writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->wakeup);
}
static int __init dmtimer_clockevent_init(struct device_node *np)
static int __init dmtimer_clkevt_init_common(struct dmtimer_clockevent *clkevt,
struct device_node *np,
unsigned int features,
const struct cpumask *cpumask,
const char *name,
int rating)
{
struct dmtimer_clockevent *clkevt;
struct clock_event_device *dev;
struct dmtimer_systimer *t;
int error;
clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL);
if (!clkevt)
return -ENOMEM;
t = &clkevt->t;
dev = &clkevt->dev;
@ -548,25 +549,23 @@ static int __init dmtimer_clockevent_init(struct device_node *np)
* We mostly use cpuidle_coupled with ARM local timers for runtime,
* so there's probably no use for CLOCK_EVT_FEAT_DYNIRQ here.
*/
dev->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
dev->rating = 300;
dev->features = features;
dev->rating = rating;
dev->set_next_event = dmtimer_set_next_event;
dev->set_state_shutdown = dmtimer_clockevent_shutdown;
dev->set_state_periodic = dmtimer_set_periodic;
dev->set_state_oneshot = dmtimer_clockevent_shutdown;
dev->set_state_oneshot_stopped = dmtimer_clockevent_shutdown;
dev->tick_resume = dmtimer_clockevent_shutdown;
dev->cpumask = cpu_possible_mask;
dev->cpumask = cpumask;
dev->irq = irq_of_parse_and_map(np, 0);
if (!dev->irq) {
error = -ENXIO;
goto err_out_free;
}
if (!dev->irq)
return -ENXIO;
error = dmtimer_systimer_setup(np, &clkevt->t);
if (error)
goto err_out_free;
return error;
clkevt->period = 0xffffffff - DIV_ROUND_CLOSEST(t->rate, HZ);
@ -578,38 +577,132 @@ static int __init dmtimer_clockevent_init(struct device_node *np)
writel_relaxed(OMAP_TIMER_CTRL_POSTED, t->base + t->ifctrl);
error = request_irq(dev->irq, dmtimer_clockevent_interrupt,
IRQF_TIMER, "clockevent", clkevt);
IRQF_TIMER, name, clkevt);
if (error)
goto err_out_unmap;
writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->irq_ena);
writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->wakeup);
pr_info("TI gptimer clockevent: %s%lu Hz at %pOF\n",
of_find_property(np, "ti,timer-alwon", NULL) ?
pr_info("TI gptimer %s: %s%lu Hz at %pOF\n",
name, of_find_property(np, "ti,timer-alwon", NULL) ?
"always-on " : "", t->rate, np->parent);
clockevents_config_and_register(dev, t->rate,
3, /* Timer internal resynch latency */
0xffffffff);
if (of_machine_is_compatible("ti,am33xx") ||
of_machine_is_compatible("ti,am43")) {
dev->suspend = omap_clockevent_idle;
dev->resume = omap_clockevent_unidle;
}
return 0;
err_out_unmap:
iounmap(t->base);
return error;
}
static int __init dmtimer_clockevent_init(struct device_node *np)
{
struct dmtimer_clockevent *clkevt;
int error;
clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL);
if (!clkevt)
return -ENOMEM;
error = dmtimer_clkevt_init_common(clkevt, np,
CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT,
cpu_possible_mask, "clockevent",
300);
if (error)
goto err_out_free;
clockevents_config_and_register(&clkevt->dev, clkevt->t.rate,
3, /* Timer internal resync latency */
0xffffffff);
if (of_machine_is_compatible("ti,am33xx") ||
of_machine_is_compatible("ti,am43")) {
clkevt->dev.suspend = omap_clockevent_idle;
clkevt->dev.resume = omap_clockevent_unidle;
}
return 0;
err_out_free:
kfree(clkevt);
return error;
}
/* Dmtimer as percpu timer. See dra7 ARM architected timer wrap erratum i940 */
static DEFINE_PER_CPU(struct dmtimer_clockevent, dmtimer_percpu_timer);
static int __init dmtimer_percpu_timer_init(struct device_node *np, int cpu)
{
struct dmtimer_clockevent *clkevt;
int error;
if (!cpu_possible(cpu))
return -EINVAL;
if (!of_property_read_bool(np->parent, "ti,no-reset-on-init") ||
!of_property_read_bool(np->parent, "ti,no-idle"))
pr_warn("Incomplete dtb for percpu dmtimer %pOF\n", np->parent);
clkevt = per_cpu_ptr(&dmtimer_percpu_timer, cpu);
error = dmtimer_clkevt_init_common(clkevt, np, CLOCK_EVT_FEAT_ONESHOT,
cpumask_of(cpu), "percpu-dmtimer",
500);
if (error)
return error;
return 0;
}
/* See TRM for timer internal resynch latency */
static int omap_dmtimer_starting_cpu(unsigned int cpu)
{
struct dmtimer_clockevent *clkevt = per_cpu_ptr(&dmtimer_percpu_timer, cpu);
struct clock_event_device *dev = &clkevt->dev;
struct dmtimer_systimer *t = &clkevt->t;
clockevents_config_and_register(dev, t->rate, 3, ULONG_MAX);
irq_force_affinity(dev->irq, cpumask_of(cpu));
return 0;
}
static int __init dmtimer_percpu_timer_startup(void)
{
struct dmtimer_clockevent *clkevt = per_cpu_ptr(&dmtimer_percpu_timer, 0);
struct dmtimer_systimer *t = &clkevt->t;
if (t->sysc) {
cpuhp_setup_state(CPUHP_AP_TI_GP_TIMER_STARTING,
"clockevents/omap/gptimer:starting",
omap_dmtimer_starting_cpu, NULL);
}
return 0;
}
subsys_initcall(dmtimer_percpu_timer_startup);
static int __init dmtimer_percpu_quirk_init(struct device_node *np, u32 pa)
{
struct device_node *arm_timer;
arm_timer = of_find_compatible_node(NULL, NULL, "arm,armv7-timer");
if (of_device_is_available(arm_timer)) {
pr_warn_once("ARM architected timer wrap issue i940 detected\n");
return 0;
}
if (pa == 0x48034000) /* dra7 dmtimer3 */
return dmtimer_percpu_timer_init(np, 0);
else if (pa == 0x48036000) /* dra7 dmtimer4 */
return dmtimer_percpu_timer_init(np, 1);
return 0;
}
/* Clocksource */
static struct dmtimer_clocksource *
to_dmtimer_clocksource(struct clocksource *cs)
@ -743,6 +836,9 @@ static int __init dmtimer_systimer_init(struct device_node *np)
if (clockevent == pa)
return dmtimer_clockevent_init(np);
if (of_machine_is_compatible("ti,dra7"))
return dmtimer_percpu_quirk_init(np, pa);
return 0;
}

View file

@ -3019,6 +3019,14 @@ static const struct x86_cpu_id hwp_support_ids[] __initconst = {
{}
};
static bool intel_pstate_hwp_is_enabled(void)
{
u64 value;
rdmsrl(MSR_PM_ENABLE, value);
return !!(value & 0x1);
}
static int __init intel_pstate_init(void)
{
const struct x86_cpu_id *id;
@ -3037,8 +3045,12 @@ static int __init intel_pstate_init(void)
* Avoid enabling HWP for processors without EPP support,
* because that means incomplete HWP implementation which is a
* corner case and supporting it is generally problematic.
*
* If HWP is enabled already, though, there is no choice but to
* deal with it.
*/
if (!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) {
if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) ||
intel_pstate_hwp_is_enabled()) {
hwp_active++;
hwp_mode_bdw = id->driver_data;
intel_pstate.attr = hwp_cpufreq_attrs;

View file

@ -989,7 +989,7 @@ int sev_dev_init(struct psp_device *psp)
if (!sev->vdata) {
ret = -ENODEV;
dev_err(dev, "sev: missing driver data\n");
goto e_err;
goto e_sev;
}
psp_set_sev_irq_handler(psp, sev_irq_handler, sev);
@ -1004,6 +1004,8 @@ int sev_dev_init(struct psp_device *psp)
e_irq:
psp_clear_sev_irq_handler(psp);
e_sev:
devm_kfree(dev, sev);
e_err:
psp->sev_data = NULL;

View file

@ -35,15 +35,15 @@ struct idxd_user_context {
unsigned int flags;
};
enum idxd_cdev_cleanup {
CDEV_NORMAL = 0,
CDEV_FAILED,
};
static void idxd_cdev_dev_release(struct device *dev)
{
dev_dbg(dev, "releasing cdev device\n");
kfree(dev);
struct idxd_cdev *idxd_cdev = container_of(dev, struct idxd_cdev, dev);
struct idxd_cdev_context *cdev_ctx;
struct idxd_wq *wq = idxd_cdev->wq;
cdev_ctx = &ictx[wq->idxd->type];
ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor);
kfree(idxd_cdev);
}
static struct device_type idxd_cdev_device_type = {
@ -58,14 +58,11 @@ static inline struct idxd_cdev *inode_idxd_cdev(struct inode *inode)
return container_of(cdev, struct idxd_cdev, cdev);
}
static inline struct idxd_wq *idxd_cdev_wq(struct idxd_cdev *idxd_cdev)
{
return container_of(idxd_cdev, struct idxd_wq, idxd_cdev);
}
static inline struct idxd_wq *inode_wq(struct inode *inode)
{
return idxd_cdev_wq(inode_idxd_cdev(inode));
struct idxd_cdev *idxd_cdev = inode_idxd_cdev(inode);
return idxd_cdev->wq;
}
static int idxd_cdev_open(struct inode *inode, struct file *filp)
@ -172,11 +169,10 @@ static __poll_t idxd_cdev_poll(struct file *filp,
struct idxd_user_context *ctx = filp->private_data;
struct idxd_wq *wq = ctx->wq;
struct idxd_device *idxd = wq->idxd;
struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
unsigned long flags;
__poll_t out = 0;
poll_wait(filp, &idxd_cdev->err_queue, wait);
poll_wait(filp, &wq->err_queue, wait);
spin_lock_irqsave(&idxd->dev_lock, flags);
if (idxd->sw_err.valid)
out = EPOLLIN | EPOLLRDNORM;
@ -198,98 +194,67 @@ int idxd_cdev_get_major(struct idxd_device *idxd)
return MAJOR(ictx[idxd->type].devt);
}
static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq)
int idxd_wq_add_cdev(struct idxd_wq *wq)
{
struct idxd_device *idxd = wq->idxd;
struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
struct idxd_cdev_context *cdev_ctx;
struct idxd_cdev *idxd_cdev;
struct cdev *cdev;
struct device *dev;
int minor, rc;
struct idxd_cdev_context *cdev_ctx;
int rc, minor;
idxd_cdev->dev = kzalloc(sizeof(*idxd_cdev->dev), GFP_KERNEL);
if (!idxd_cdev->dev)
idxd_cdev = kzalloc(sizeof(*idxd_cdev), GFP_KERNEL);
if (!idxd_cdev)
return -ENOMEM;
dev = idxd_cdev->dev;
dev->parent = &idxd->pdev->dev;
dev_set_name(dev, "%s/wq%u.%u", idxd_get_dev_name(idxd),
idxd->id, wq->id);
dev->bus = idxd_get_bus_type(idxd);
idxd_cdev->wq = wq;
cdev = &idxd_cdev->cdev;
dev = &idxd_cdev->dev;
cdev_ctx = &ictx[wq->idxd->type];
minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL);
if (minor < 0) {
rc = minor;
kfree(dev);
goto ida_err;
}
dev->devt = MKDEV(MAJOR(cdev_ctx->devt), minor);
dev->type = &idxd_cdev_device_type;
rc = device_register(dev);
if (rc < 0) {
dev_err(&idxd->pdev->dev, "device register failed\n");
goto dev_reg_err;
kfree(idxd_cdev);
return minor;
}
idxd_cdev->minor = minor;
return 0;
device_initialize(dev);
dev->parent = &wq->conf_dev;
dev->bus = idxd_get_bus_type(idxd);
dev->type = &idxd_cdev_device_type;
dev->devt = MKDEV(MAJOR(cdev_ctx->devt), minor);
dev_reg_err:
ida_simple_remove(&cdev_ctx->minor_ida, MINOR(dev->devt));
put_device(dev);
ida_err:
idxd_cdev->dev = NULL;
return rc;
}
static void idxd_wq_cdev_cleanup(struct idxd_wq *wq,
enum idxd_cdev_cleanup cdev_state)
{
struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
struct idxd_cdev_context *cdev_ctx;
cdev_ctx = &ictx[wq->idxd->type];
if (cdev_state == CDEV_NORMAL)
cdev_del(&idxd_cdev->cdev);
device_unregister(idxd_cdev->dev);
/*
* The device_type->release() will be called on the device and free
* the allocated struct device. We can just forget it.
*/
ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor);
idxd_cdev->dev = NULL;
idxd_cdev->minor = -1;
}
int idxd_wq_add_cdev(struct idxd_wq *wq)
{
struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
struct cdev *cdev = &idxd_cdev->cdev;
struct device *dev;
int rc;
rc = idxd_wq_cdev_dev_setup(wq);
rc = dev_set_name(dev, "%s/wq%u.%u", idxd_get_dev_name(idxd),
idxd->id, wq->id);
if (rc < 0)
return rc;
goto err;
dev = idxd_cdev->dev;
wq->idxd_cdev = idxd_cdev;
cdev_init(cdev, &idxd_cdev_fops);
cdev_set_parent(cdev, &dev->kobj);
rc = cdev_add(cdev, dev->devt, 1);
rc = cdev_device_add(cdev, dev);
if (rc) {
dev_dbg(&wq->idxd->pdev->dev, "cdev_add failed: %d\n", rc);
idxd_wq_cdev_cleanup(wq, CDEV_FAILED);
return rc;
goto err;
}
init_waitqueue_head(&idxd_cdev->err_queue);
return 0;
err:
put_device(dev);
wq->idxd_cdev = NULL;
return rc;
}
void idxd_wq_del_cdev(struct idxd_wq *wq)
{
idxd_wq_cdev_cleanup(wq, CDEV_NORMAL);
struct idxd_cdev *idxd_cdev;
struct idxd_cdev_context *cdev_ctx;
cdev_ctx = &ictx[wq->idxd->type];
idxd_cdev = wq->idxd_cdev;
wq->idxd_cdev = NULL;
cdev_device_del(&idxd_cdev->cdev, &idxd_cdev->dev);
put_device(&idxd_cdev->dev);
}
int idxd_cdev_register(void)

View file

@ -169,8 +169,6 @@ int idxd_wq_alloc_resources(struct idxd_wq *wq)
desc->id = i;
desc->wq = wq;
desc->cpu = -1;
dma_async_tx_descriptor_init(&desc->txd, &wq->dma_chan);
desc->txd.tx_submit = idxd_dma_tx_submit;
}
return 0;
@ -378,7 +376,8 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
if (idxd_device_is_halted(idxd)) {
dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
*status = IDXD_CMDSTS_HW_ERR;
if (status)
*status = IDXD_CMDSTS_HW_ERR;
return;
}

View file

@ -14,7 +14,10 @@
static inline struct idxd_wq *to_idxd_wq(struct dma_chan *c)
{
return container_of(c, struct idxd_wq, dma_chan);
struct idxd_dma_chan *idxd_chan;
idxd_chan = container_of(c, struct idxd_dma_chan, chan);
return idxd_chan->wq;
}
void idxd_dma_complete_txd(struct idxd_desc *desc,
@ -144,7 +147,7 @@ static void idxd_dma_issue_pending(struct dma_chan *dma_chan)
{
}
dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
static dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
{
struct dma_chan *c = tx->chan;
struct idxd_wq *wq = to_idxd_wq(c);
@ -165,14 +168,25 @@ dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
static void idxd_dma_release(struct dma_device *device)
{
struct idxd_dma_dev *idxd_dma = container_of(device, struct idxd_dma_dev, dma);
kfree(idxd_dma);
}
int idxd_register_dma_device(struct idxd_device *idxd)
{
struct dma_device *dma = &idxd->dma_dev;
struct idxd_dma_dev *idxd_dma;
struct dma_device *dma;
struct device *dev = &idxd->pdev->dev;
int rc;
idxd_dma = kzalloc_node(sizeof(*idxd_dma), GFP_KERNEL, dev_to_node(dev));
if (!idxd_dma)
return -ENOMEM;
dma = &idxd_dma->dma;
INIT_LIST_HEAD(&dma->channels);
dma->dev = &idxd->pdev->dev;
dma->dev = dev;
dma_cap_set(DMA_PRIVATE, dma->cap_mask);
dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask);
@ -188,35 +202,72 @@ int idxd_register_dma_device(struct idxd_device *idxd)
dma->device_alloc_chan_resources = idxd_dma_alloc_chan_resources;
dma->device_free_chan_resources = idxd_dma_free_chan_resources;
return dma_async_device_register(&idxd->dma_dev);
rc = dma_async_device_register(dma);
if (rc < 0) {
kfree(idxd_dma);
return rc;
}
idxd_dma->idxd = idxd;
/*
* This pointer is protected by the refs taken by the dma_chan. It will remain valid
* as long as there are outstanding channels.
*/
idxd->idxd_dma = idxd_dma;
return 0;
}
void idxd_unregister_dma_device(struct idxd_device *idxd)
{
dma_async_device_unregister(&idxd->dma_dev);
dma_async_device_unregister(&idxd->idxd_dma->dma);
}
int idxd_register_dma_channel(struct idxd_wq *wq)
{
struct idxd_device *idxd = wq->idxd;
struct dma_device *dma = &idxd->dma_dev;
struct dma_chan *chan = &wq->dma_chan;
int rc;
struct dma_device *dma = &idxd->idxd_dma->dma;
struct device *dev = &idxd->pdev->dev;
struct idxd_dma_chan *idxd_chan;
struct dma_chan *chan;
int rc, i;
memset(&wq->dma_chan, 0, sizeof(struct dma_chan));
idxd_chan = kzalloc_node(sizeof(*idxd_chan), GFP_KERNEL, dev_to_node(dev));
if (!idxd_chan)
return -ENOMEM;
chan = &idxd_chan->chan;
chan->device = dma;
list_add_tail(&chan->device_node, &dma->channels);
for (i = 0; i < wq->num_descs; i++) {
struct idxd_desc *desc = wq->descs[i];
dma_async_tx_descriptor_init(&desc->txd, chan);
desc->txd.tx_submit = idxd_dma_tx_submit;
}
rc = dma_async_device_channel_register(dma, chan);
if (rc < 0)
if (rc < 0) {
kfree(idxd_chan);
return rc;
}
wq->idxd_chan = idxd_chan;
idxd_chan->wq = wq;
get_device(&wq->conf_dev);
return 0;
}
void idxd_unregister_dma_channel(struct idxd_wq *wq)
{
struct dma_chan *chan = &wq->dma_chan;
struct idxd_dma_chan *idxd_chan = wq->idxd_chan;
struct dma_chan *chan = &idxd_chan->chan;
struct idxd_dma_dev *idxd_dma = wq->idxd->idxd_dma;
dma_async_device_channel_unregister(&wq->idxd->dma_dev, chan);
dma_async_device_channel_unregister(&idxd_dma->dma, chan);
list_del(&chan->device_node);
kfree(wq->idxd_chan);
wq->idxd_chan = NULL;
put_device(&wq->conf_dev);
}

View file

@ -14,6 +14,9 @@
extern struct kmem_cache *idxd_desc_pool;
struct idxd_device;
struct idxd_wq;
#define IDXD_REG_TIMEOUT 50
#define IDXD_DRAIN_TIMEOUT 5000
@ -68,10 +71,10 @@ enum idxd_wq_type {
};
struct idxd_cdev {
struct idxd_wq *wq;
struct cdev cdev;
struct device *dev;
struct device dev;
int minor;
struct wait_queue_head err_queue;
};
#define IDXD_ALLOCATED_BATCH_SIZE 128U
@ -88,10 +91,16 @@ enum idxd_complete_type {
IDXD_COMPLETE_ABORT,
};
struct idxd_dma_chan {
struct dma_chan chan;
struct idxd_wq *wq;
};
struct idxd_wq {
void __iomem *dportal;
struct device conf_dev;
struct idxd_cdev idxd_cdev;
struct idxd_cdev *idxd_cdev;
struct wait_queue_head err_queue;
struct idxd_device *idxd;
int id;
enum idxd_wq_type type;
@ -112,7 +121,7 @@ struct idxd_wq {
int compls_size;
struct idxd_desc **descs;
struct sbitmap_queue sbq;
struct dma_chan dma_chan;
struct idxd_dma_chan *idxd_chan;
char name[WQ_NAME_SIZE + 1];
u64 max_xfer_bytes;
u32 max_batch_size;
@ -147,6 +156,11 @@ enum idxd_device_flag {
IDXD_FLAG_CMD_RUNNING,
};
struct idxd_dma_dev {
struct idxd_device *idxd;
struct dma_device dma;
};
struct idxd_device {
enum idxd_type type;
struct device conf_dev;
@ -191,7 +205,7 @@ struct idxd_device {
int num_wq_irqs;
struct idxd_irq_entry *irq_entries;
struct dma_device dma_dev;
struct idxd_dma_dev *idxd_dma;
struct workqueue_struct *wq;
struct work_struct work;
};
@ -313,7 +327,6 @@ void idxd_unregister_dma_channel(struct idxd_wq *wq);
void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
void idxd_dma_complete_txd(struct idxd_desc *desc,
enum idxd_complete_type comp_type);
dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx);
/* cdev */
int idxd_cdev_register(void);

View file

@ -175,7 +175,7 @@ static int idxd_setup_internals(struct idxd_device *idxd)
wq->id = i;
wq->idxd = idxd;
mutex_init(&wq->wq_lock);
wq->idxd_cdev.minor = -1;
init_waitqueue_head(&wq->err_queue);
wq->max_xfer_bytes = idxd->max_xfer_bytes;
wq->max_batch_size = idxd->max_batch_size;
wq->wqcfg = devm_kzalloc(dev, idxd->wqcfg_size, GFP_KERNEL);

View file

@ -75,7 +75,7 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
struct idxd_wq *wq = &idxd->wqs[id];
if (wq->type == IDXD_WQT_USER)
wake_up_interruptible(&wq->idxd_cdev.err_queue);
wake_up_interruptible(&wq->err_queue);
} else {
int i;
@ -83,7 +83,7 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
struct idxd_wq *wq = &idxd->wqs[i];
if (wq->type == IDXD_WQT_USER)
wake_up_interruptible(&wq->idxd_cdev.err_queue);
wake_up_interruptible(&wq->err_queue);
}
}

View file

@ -1052,8 +1052,16 @@ static ssize_t wq_cdev_minor_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
int minor = -1;
return sprintf(buf, "%d\n", wq->idxd_cdev.minor);
mutex_lock(&wq->wq_lock);
if (wq->idxd_cdev)
minor = wq->idxd_cdev->minor;
mutex_unlock(&wq->wq_lock);
if (minor == -1)
return -ENXIO;
return sysfs_emit(buf, "%d\n", minor);
}
static struct device_attribute dev_attr_wq_cdev_minor =

View file

@ -75,6 +75,8 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
ib->ptr = amdgpu_sa_bo_cpu_addr(ib->sa_bo);
/* flush the cache before commit the IB */
ib->flags = AMDGPU_IB_FLAG_EMIT_MEM_SYNC;
if (!vm)
ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);

View file

@ -643,6 +643,7 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct
/* File created at /sys/class/drm/card0/device/hdcp_srm*/
hdcp_work[0].attr = data_attr;
sysfs_bin_attr_init(&hdcp_work[0].attr);
if (sysfs_create_bin_file(&adev->dev->kobj, &hdcp_work[0].attr))
DRM_WARN("Failed to create device file hdcp_srm");

View file

@ -2504,6 +2504,10 @@ static void commit_planes_for_stream(struct dc *dc,
plane_state->triplebuffer_flips = true;
}
}
if (update_type == UPDATE_TYPE_FULL) {
/* force vsync flip when reconfiguring pipes to prevent underflow */
plane_state->flip_immediate = false;
}
}
}

View file

@ -1,5 +1,5 @@
/*
* Copyright 2012-17 Advanced Micro Devices, Inc.
* Copyright 2012-2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@ -181,11 +181,14 @@ void hubp2_vready_at_or_After_vsync(struct hubp *hubp,
else
Set HUBP_VREADY_AT_OR_AFTER_VSYNC = 0
*/
if ((pipe_dest->vstartup_start - (pipe_dest->vready_offset+pipe_dest->vupdate_width
+ pipe_dest->vupdate_offset) / pipe_dest->htotal) <= pipe_dest->vblank_end) {
value = 1;
} else
value = 0;
if (pipe_dest->htotal != 0) {
if ((pipe_dest->vstartup_start - (pipe_dest->vready_offset+pipe_dest->vupdate_width
+ pipe_dest->vupdate_offset) / pipe_dest->htotal) <= pipe_dest->vblank_end) {
value = 1;
} else
value = 0;
}
REG_UPDATE(DCHUBP_CNTL, HUBP_VREADY_AT_OR_AFTER_VSYNC, value);
}

View file

@ -789,6 +789,8 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp)
TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) {
hdcp->connection.is_hdcp2_revoked = 1;
status = MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED;
} else {
status = MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE;
}
}
mutex_unlock(&psp->hdcp_context.mutex);

View file

@ -382,7 +382,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
i830_overlay_clock_gating(dev_priv, true);
}
static void
__i915_active_call static void
intel_overlay_last_flip_retire(struct i915_active *active)
{
struct intel_overlay *overlay =

View file

@ -189,7 +189,7 @@ compute_partial_view(const struct drm_i915_gem_object *obj,
struct i915_ggtt_view view;
if (i915_gem_object_is_tiled(obj))
chunk = roundup(chunk, tile_row_pages(obj));
chunk = roundup(chunk, tile_row_pages(obj) ?: 1);
view.type = I915_GGTT_VIEW_PARTIAL;
view.partial.offset = rounddown(page_offset, chunk);

View file

@ -628,7 +628,6 @@ static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
err = pin_pt_dma(vm, pde->pt.base);
if (err) {
i915_gem_object_put(pde->pt.base);
free_pd(vm, pde);
return err;
}

View file

@ -652,8 +652,8 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
* banks of memory are paired and unswizzled on the
* uneven portion, so leave that as unknown.
*/
if (intel_uncore_read(uncore, C0DRB3) ==
intel_uncore_read(uncore, C1DRB3)) {
if (intel_uncore_read16(uncore, C0DRB3) ==
intel_uncore_read16(uncore, C1DRB3)) {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
}

View file

@ -1159,7 +1159,8 @@ static int auto_active(struct i915_active *ref)
return 0;
}
static void auto_retire(struct i915_active *ref)
__i915_active_call static void
auto_retire(struct i915_active *ref)
{
i915_active_put(ref);
}

View file

@ -527,6 +527,7 @@ int dp_audio_hw_params(struct device *dev,
dp_audio_setup_acr(audio);
dp_audio_safe_to_exit_level(audio);
dp_audio_enable(audio, true);
dp_display_signal_audio_start(dp_display);
dp_display->audio_enabled = true;
end:

View file

@ -176,6 +176,15 @@ static int dp_del_event(struct dp_display_private *dp_priv, u32 event)
return 0;
}
void dp_display_signal_audio_start(struct msm_dp *dp_display)
{
struct dp_display_private *dp;
dp = container_of(dp_display, struct dp_display_private, dp_display);
reinit_completion(&dp->audio_comp);
}
void dp_display_signal_audio_complete(struct msm_dp *dp_display)
{
struct dp_display_private *dp;
@ -620,7 +629,6 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
dp_add_event(dp, EV_DISCONNECT_PENDING_TIMEOUT, 0, DP_TIMEOUT_5_SECOND);
/* signal the disconnect event early to ensure proper teardown */
reinit_completion(&dp->audio_comp);
dp_display_handle_plugged_change(g_dp_display, false);
dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK |
@ -841,7 +849,6 @@ static int dp_display_disable(struct dp_display_private *dp, u32 data)
/* wait only if audio was enabled */
if (dp_display->audio_enabled) {
/* signal the disconnect event */
reinit_completion(&dp->audio_comp);
dp_display_handle_plugged_change(dp_display, false);
if (!wait_for_completion_timeout(&dp->audio_comp,
HZ * 5))

View file

@ -34,6 +34,7 @@ int dp_display_get_modes(struct msm_dp *dp_display,
int dp_display_request_irq(struct msm_dp *dp_display);
bool dp_display_check_video_test(struct msm_dp *dp_display);
int dp_display_get_test_bpp(struct msm_dp *dp_display);
void dp_display_signal_audio_start(struct msm_dp *dp_display);
void dp_display_signal_audio_complete(struct msm_dp *dp_display);
#endif /* _DP_DISPLAY_H_ */

View file

@ -1559,6 +1559,7 @@ struct radeon_dpm {
void *priv;
u32 new_active_crtcs;
int new_active_crtc_count;
int high_pixelclock_count;
u32 current_active_crtcs;
int current_active_crtc_count;
bool single_display;

View file

@ -2126,11 +2126,14 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
return state_index;
/* last mode is usually default, array is low to high */
for (i = 0; i < num_modes; i++) {
rdev->pm.power_state[state_index].clock_info =
kcalloc(1, sizeof(struct radeon_pm_clock_info),
GFP_KERNEL);
/* avoid memory leaks from invalid modes or unknown frev. */
if (!rdev->pm.power_state[state_index].clock_info) {
rdev->pm.power_state[state_index].clock_info =
kzalloc(sizeof(struct radeon_pm_clock_info),
GFP_KERNEL);
}
if (!rdev->pm.power_state[state_index].clock_info)
return state_index;
goto out;
rdev->pm.power_state[state_index].num_clock_modes = 1;
rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
switch (frev) {
@ -2249,17 +2252,24 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
break;
}
}
out:
/* free any unused clock_info allocation. */
if (state_index && state_index < num_modes) {
kfree(rdev->pm.power_state[state_index].clock_info);
rdev->pm.power_state[state_index].clock_info = NULL;
}
/* last mode is usually default */
if (rdev->pm.default_power_state_index == -1) {
if (state_index && rdev->pm.default_power_state_index == -1) {
rdev->pm.power_state[state_index - 1].type =
POWER_STATE_TYPE_DEFAULT;
rdev->pm.default_power_state_index = state_index - 1;
rdev->pm.power_state[state_index - 1].default_clock_mode =
&rdev->pm.power_state[state_index - 1].clock_info[0];
rdev->pm.power_state[state_index].flags &=
rdev->pm.power_state[state_index - 1].flags &=
~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
rdev->pm.power_state[state_index].misc = 0;
rdev->pm.power_state[state_index].misc2 = 0;
rdev->pm.power_state[state_index - 1].misc = 0;
rdev->pm.power_state[state_index - 1].misc2 = 0;
}
return state_index;
}

View file

@ -1747,6 +1747,7 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
struct drm_device *ddev = rdev->ddev;
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
struct radeon_connector *radeon_connector;
if (!rdev->pm.dpm_enabled)
return;
@ -1756,6 +1757,7 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
/* update active crtc counts */
rdev->pm.dpm.new_active_crtcs = 0;
rdev->pm.dpm.new_active_crtc_count = 0;
rdev->pm.dpm.high_pixelclock_count = 0;
if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
list_for_each_entry(crtc,
&ddev->mode_config.crtc_list, head) {
@ -1763,6 +1765,12 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
if (crtc->enabled) {
rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
rdev->pm.dpm.new_active_crtc_count++;
if (!radeon_crtc->connector)
continue;
radeon_connector = to_radeon_connector(radeon_crtc->connector);
if (radeon_connector->pixelclock_for_modeset > 297000)
rdev->pm.dpm.high_pixelclock_count++;
}
}
}

View file

@ -2982,6 +2982,9 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
(rdev->pdev->device == 0x6605)) {
max_sclk = 75000;
}
if (rdev->pm.dpm.high_pixelclock_count > 1)
disable_sclk_switching = true;
}
if (rps->vce_active) {

View file

@ -209,9 +209,9 @@ int occ_update_response(struct occ *occ)
return rc;
/* limit the maximum rate of polling the OCC */
if (time_after(jiffies, occ->last_update + OCC_UPDATE_FREQUENCY)) {
if (time_after(jiffies, occ->next_update)) {
rc = occ_poll(occ);
occ->last_update = jiffies;
occ->next_update = jiffies + OCC_UPDATE_FREQUENCY;
} else {
rc = occ->last_error;
}
@ -1089,6 +1089,7 @@ int occ_setup(struct occ *occ, const char *name)
return rc;
}
occ->next_update = jiffies + OCC_UPDATE_FREQUENCY;
occ_parse_poll_response(occ);
rc = occ_setup_sensor_attrs(occ);

Some files were not shown because too many files have changed in this diff Show more