From cbf85a8a6d9a7e305ea37af597d32cb832241466 Mon Sep 17 00:00:00 2001 From: root Date: Fri, 17 Oct 2025 16:40:24 +0200 Subject: [PATCH] Trying to fix black screen on wake... --- ACPI/SSDTTime-master/.gitattributes | 3 + ACPI/SSDTTime-master/.gitignore | 110 + ACPI/SSDTTime-master/LICENSE | 21 + ACPI/SSDTTime-master/PatchMerge.bat | 426 ++ ACPI/SSDTTime-master/PatchMerge.command | 339 ++ ACPI/SSDTTime-master/PatchMerge.py | 586 +++ ACPI/SSDTTime-master/README.md | 47 + ACPI/SSDTTime-master/SSDTTime.bat | 426 ++ ACPI/SSDTTime-master/SSDTTime.command | 339 ++ ACPI/SSDTTime-master/SSDTTime.py | 4138 ++++++++++++++++++++ ACPI/SSDTTime-master/Scripts/__init__.py | 4 + ACPI/SSDTTime-master/Scripts/downloader.py | 330 ++ ACPI/SSDTTime-master/Scripts/dsdt.py | 907 +++++ ACPI/SSDTTime-master/Scripts/plist.py | 688 ++++ ACPI/SSDTTime-master/Scripts/reveal.py | 69 + ACPI/SSDTTime-master/Scripts/run.py | 151 + ACPI/SSDTTime-master/Scripts/utils.py | 263 ++ EFI/OC/config.plist | 4 +- 18 files changed, 8849 insertions(+), 2 deletions(-) create mode 100644 ACPI/SSDTTime-master/.gitattributes create mode 100644 ACPI/SSDTTime-master/.gitignore create mode 100644 ACPI/SSDTTime-master/LICENSE create mode 100644 ACPI/SSDTTime-master/PatchMerge.bat create mode 100644 ACPI/SSDTTime-master/PatchMerge.command create mode 100644 ACPI/SSDTTime-master/PatchMerge.py create mode 100644 ACPI/SSDTTime-master/README.md create mode 100644 ACPI/SSDTTime-master/SSDTTime.bat create mode 100644 ACPI/SSDTTime-master/SSDTTime.command create mode 100644 ACPI/SSDTTime-master/SSDTTime.py create mode 100644 ACPI/SSDTTime-master/Scripts/__init__.py create mode 100644 ACPI/SSDTTime-master/Scripts/downloader.py create mode 100644 ACPI/SSDTTime-master/Scripts/dsdt.py create mode 100644 ACPI/SSDTTime-master/Scripts/plist.py create mode 100644 ACPI/SSDTTime-master/Scripts/reveal.py create mode 100644 ACPI/SSDTTime-master/Scripts/run.py create mode 100644 ACPI/SSDTTime-master/Scripts/utils.py diff --git a/ACPI/SSDTTime-master/.gitattributes b/ACPI/SSDTTime-master/.gitattributes new file mode 100644 index 0000000..e1c78ce --- /dev/null +++ b/ACPI/SSDTTime-master/.gitattributes @@ -0,0 +1,3 @@ +# Ensure all .bat scripts use CRLF line endings +# This can prevent a number of odd batch issues +*.bat text eol=crlf diff --git a/ACPI/SSDTTime-master/.gitignore b/ACPI/SSDTTime-master/.gitignore new file mode 100644 index 0000000..f435095 --- /dev/null +++ b/ACPI/SSDTTime-master/.gitignore @@ -0,0 +1,110 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ + +Results/* +iasl* +acpidump* + +.vs \ No newline at end of file diff --git a/ACPI/SSDTTime-master/LICENSE b/ACPI/SSDTTime-master/LICENSE new file mode 100644 index 0000000..a481b29 --- /dev/null +++ b/ACPI/SSDTTime-master/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019 CorpNewt + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/ACPI/SSDTTime-master/PatchMerge.bat b/ACPI/SSDTTime-master/PatchMerge.bat new file mode 100644 index 0000000..154789d --- /dev/null +++ b/ACPI/SSDTTime-master/PatchMerge.bat @@ -0,0 +1,426 @@ +@echo off +REM Get our local path and args before delayed expansion - allows % and ! +set "thisDir=%~dp0" +set "args=%*" + +setlocal enableDelayedExpansion +REM Setup initial vars +set "script_name=" +set /a tried=0 +set "toask=yes" +set "pause_on_error=yes" +set "py2v=" +set "py2path=" +set "py3v=" +set "py3path=" +set "pypath=" +set "targetpy=3" + +REM use_py3: +REM TRUE = Use if found, use py2 otherwise +REM FALSE = Use py2 +REM FORCE = Use py3 +set "use_py3=TRUE" + +REM We'll parse if the first argument passed is +REM --install-python and if so, we'll just install +REM Can optionally take a version number as the +REM second arg - i.e. --install-python 3.13.1 +set "just_installing=FALSE" +set "user_provided=" + +REM Get the system32 (or equivalent) path +call :getsyspath "syspath" + +REM Make sure the syspath exists +if "!syspath!" == "" ( + if exist "%SYSTEMROOT%\system32\cmd.exe" ( + if exist "%SYSTEMROOT%\system32\reg.exe" ( + if exist "%SYSTEMROOT%\system32\where.exe" ( + REM Fall back on the default path if it exists + set "ComSpec=%SYSTEMROOT%\system32\cmd.exe" + set "syspath=%SYSTEMROOT%\system32\" + ) + ) + ) + if "!syspath!" == "" ( + cls + echo ### ### + echo # Missing Required Files # + echo ### ### + echo. + echo Could not locate cmd.exe, reg.exe, or where.exe + echo. + echo Please ensure your ComSpec environment variable is properly configured and + echo points directly to cmd.exe, then try again. + echo. + echo Current CompSpec Value: "%ComSpec%" + echo. + echo Press [enter] to quit. + pause > nul + exit /b 1 + ) +) + +if "%~1" == "--install-python" ( + set "just_installing=TRUE" + set "user_provided=%~2" + goto installpy +) + +goto checkscript + +:checkscript +REM Check for our script first +set "looking_for=!script_name!" +if "!script_name!" == "" ( + set "looking_for=%~n0.py or %~n0.command" + set "script_name=%~n0.py" + if not exist "!thisDir!\!script_name!" ( + set "script_name=%~n0.command" + ) +) +if not exist "!thisDir!\!script_name!" ( + cls + echo ### ### + echo # Target Not Found # + echo ### ### + echo. + echo Could not find !looking_for!. + echo Please make sure to run this script from the same directory + echo as !looking_for!. + echo. + echo Press [enter] to quit. + pause > nul + exit /b 1 +) +goto checkpy + +:checkpy +call :updatepath +for /f "USEBACKQ tokens=*" %%x in (`!syspath!where.exe python 2^> nul`) do ( call :checkpyversion "%%x" "py2v" "py2path" "py3v" "py3path" ) +for /f "USEBACKQ tokens=*" %%x in (`!syspath!where.exe python3 2^> nul`) do ( call :checkpyversion "%%x" "py2v" "py2path" "py3v" "py3path" ) +for /f "USEBACKQ tokens=*" %%x in (`!syspath!where.exe py 2^> nul`) do ( call :checkpylauncher "%%x" "py2v" "py2path" "py3v" "py3path" ) +REM Walk our returns to see if we need to install +if /i "!use_py3!" == "FALSE" ( + set "targetpy=2" + set "pypath=!py2path!" +) else if /i "!use_py3!" == "FORCE" ( + set "pypath=!py3path!" +) else if /i "!use_py3!" == "TRUE" ( + set "pypath=!py3path!" + if "!pypath!" == "" set "pypath=!py2path!" +) +if not "!pypath!" == "" ( + goto runscript +) +if !tried! lss 1 ( + if /i "!toask!"=="yes" ( + REM Better ask permission first + goto askinstall + ) else ( + goto installpy + ) +) else ( + cls + echo ### ### + echo # Python Not Found # + echo ### ### + echo. + REM Couldn't install for whatever reason - give the error message + echo Python is not installed or not found in your PATH var. + echo Please go to https://www.python.org/downloads/windows/ to + echo download and install the latest version, then try again. + echo. + echo Make sure you check the box labeled: + echo. + echo "Add Python X.X to PATH" + echo. + echo Where X.X is the py version you're installing. + echo. + echo Press [enter] to quit. + pause > nul + exit /b 1 +) +goto runscript + +:checkpylauncher +REM Attempt to check the latest python 2 and 3 versions via the py launcher +for /f "USEBACKQ tokens=*" %%x in (`%~1 -2 -c "import sys; print(sys.executable)" 2^> nul`) do ( call :checkpyversion "%%x" "%~2" "%~3" "%~4" "%~5" ) +for /f "USEBACKQ tokens=*" %%x in (`%~1 -3 -c "import sys; print(sys.executable)" 2^> nul`) do ( call :checkpyversion "%%x" "%~2" "%~3" "%~4" "%~5" ) +goto :EOF + +:checkpyversion +set "version="&for /f "tokens=2* USEBACKQ delims= " %%a in (`"%~1" -V 2^>^&1`) do ( + REM Ensure we have a version number + call :isnumber "%%a" + if not "!errorlevel!" == "0" goto :EOF + set "version=%%a" +) +if not defined version goto :EOF +if "!version:~0,1!" == "2" ( + REM Python 2 + call :comparepyversion "!version!" "!%~2!" + if "!errorlevel!" == "1" ( + set "%~2=!version!" + set "%~3=%~1" + ) +) else ( + REM Python 3 + call :comparepyversion "!version!" "!%~4!" + if "!errorlevel!" == "1" ( + set "%~4=!version!" + set "%~5=%~1" + ) +) +goto :EOF + +:isnumber +set "var="&for /f "delims=0123456789." %%i in ("%~1") do set var=%%i +if defined var (exit /b 1) +exit /b 0 + +:comparepyversion +REM Exits with status 0 if equal, 1 if v1 gtr v2, 2 if v1 lss v2 +for /f "tokens=1,2,3 delims=." %%a in ("%~1") do ( + set a1=%%a + set a2=%%b + set a3=%%c +) +for /f "tokens=1,2,3 delims=." %%a in ("%~2") do ( + set b1=%%a + set b2=%%b + set b3=%%c +) +if not defined a1 set a1=0 +if not defined a2 set a2=0 +if not defined a3 set a3=0 +if not defined b1 set b1=0 +if not defined b2 set b2=0 +if not defined b3 set b3=0 +if %a1% gtr %b1% exit /b 1 +if %a1% lss %b1% exit /b 2 +if %a2% gtr %b2% exit /b 1 +if %a2% lss %b2% exit /b 2 +if %a3% gtr %b3% exit /b 1 +if %a3% lss %b3% exit /b 2 +exit /b 0 + +:askinstall +cls +echo ### ### +echo # Python Not Found # +echo ### ### +echo. +echo Python !targetpy! was not found on the system or in the PATH var. +echo. +set /p "menu=Would you like to install it now? [y/n]: " +if /i "!menu!"=="y" ( + REM We got the OK - install it + goto installpy +) else if "!menu!"=="n" ( + REM No OK here... + set /a tried=!tried!+1 + goto checkpy +) +REM Incorrect answer - go back +goto askinstall + +:installpy +REM This will attempt to download and install python +set /a tried=!tried!+1 +cls +echo ### ### +echo # Downloading Python # +echo ### ### +echo. +set "release=!user_provided!" +if "!release!" == "" ( + REM No explicit release set - get the latest from python.org + echo Gathering latest version... + powershell -command "[Net.ServicePointManager]::SecurityProtocol=[Net.SecurityProtocolType]::Tls12;(new-object System.Net.WebClient).DownloadFile('https://www.python.org/downloads/windows/','%TEMP%\pyurl.txt')" + REM Extract it if it's gzip compressed + powershell -command "$infile='%TEMP%\pyurl.txt';$outfile='%TEMP%\pyurl.temp';try{$input=New-Object System.IO.FileStream $infile,([IO.FileMode]::Open),([IO.FileAccess]::Read),([IO.FileShare]::Read);$output=New-Object System.IO.FileStream $outfile,([IO.FileMode]::Create),([IO.FileAccess]::Write),([IO.FileShare]::None);$gzipStream=New-Object System.IO.Compression.GzipStream $input,([IO.Compression.CompressionMode]::Decompress);$buffer=New-Object byte[](1024);while($true){$read=$gzipstream.Read($buffer,0,1024);if($read -le 0){break};$output.Write($buffer,0,$read)};$gzipStream.Close();$output.Close();$input.Close();Move-Item -Path $outfile -Destination $infile -Force}catch{}" + if not exist "%TEMP%\pyurl.txt" ( + if /i "!just_installing!" == "TRUE" ( + echo - Failed to get info + exit /b 1 + ) else ( + goto checkpy + ) + ) + pushd "%TEMP%" + :: Version detection code slimmed by LussacZheng (https://github.com/corpnewt/gibMacOS/issues/20) + for /f "tokens=9 delims=< " %%x in ('findstr /i /c:"Latest Python !targetpy! Release" pyurl.txt') do ( set "release=%%x" ) + popd + REM Let's delete our txt file now - we no longer need it + del "%TEMP%\pyurl.txt" + if "!release!" == "" ( + if /i "!just_installing!" == "TRUE" ( + echo - Failed to get python version + exit /b 1 + ) else ( + goto checkpy + ) + ) + echo Located Version: !release! +) else ( + echo User-Provided Version: !release! + REM Update our targetpy to reflect the first number of + REM our release + for /f "tokens=1 delims=." %%a in ("!release!") do ( + call :isnumber "%%a" + if "!errorlevel!" == "0" ( + set "targetpy=%%a" + ) + ) +) +echo Building download url... +REM At this point - we should have the version number. +REM We can build the url like so: "https://www.python.org/ftp/python/[version]/python-[version]-amd64.exe" +set "url=https://www.python.org/ftp/python/!release!/python-!release!-amd64.exe" +set "pytype=exe" +if "!targetpy!" == "2" ( + set "url=https://www.python.org/ftp/python/!release!/python-!release!.amd64.msi" + set "pytype=msi" +) +echo - !url! +echo Downloading... +REM Now we download it with our slick powershell command +powershell -command "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; (new-object System.Net.WebClient).DownloadFile('!url!','%TEMP%\pyinstall.!pytype!')" +REM If it doesn't exist - we bail +if not exist "%TEMP%\pyinstall.!pytype!" ( + if /i "!just_installing!" == "TRUE" ( + echo - Failed to download python installer + exit /b 1 + ) else ( + goto checkpy + ) +) +REM It should exist at this point - let's run it to install silently +echo Running python !pytype! installer... +pushd "%TEMP%" +if /i "!pytype!" == "exe" ( + echo - pyinstall.exe /quiet PrependPath=1 Include_test=0 Shortcuts=0 Include_launcher=0 + pyinstall.exe /quiet PrependPath=1 Include_test=0 Shortcuts=0 Include_launcher=0 +) else ( + set "foldername=!release:.=!" + echo - msiexec /i pyinstall.msi /qb ADDLOCAL=ALL TARGETDIR="%LocalAppData%\Programs\Python\Python!foldername:~0,2!" + msiexec /i pyinstall.msi /qb ADDLOCAL=ALL TARGETDIR="%LocalAppData%\Programs\Python\Python!foldername:~0,2!" +) +popd +set "py_error=!errorlevel!" +echo Installer finished with status: !py_error! +echo Cleaning up... +REM Now we should be able to delete the installer and check for py again +del "%TEMP%\pyinstall.!pytype!" +REM If it worked, then we should have python in our PATH +REM this does not get updated right away though - let's try +REM manually updating the local PATH var +call :updatepath +if /i "!just_installing!" == "TRUE" ( + echo. + echo Done. +) else ( + goto checkpy +) +exit /b + +:runscript +REM Python found +cls +REM Checks the args gathered at the beginning of the script. +REM Make sure we're not just forwarding empty quotes. +set "arg_test=!args:"=!" +if "!arg_test!"=="" ( + "!pypath!" "!thisDir!!script_name!" +) else ( + "!pypath!" "!thisDir!!script_name!" !args! +) +if /i "!pause_on_error!" == "yes" ( + if not "%ERRORLEVEL%" == "0" ( + echo. + echo Script exited with error code: %ERRORLEVEL% + echo. + echo Press [enter] to exit... + pause > nul + ) +) +goto :EOF + +:undouble +REM Helper function to strip doubles of a single character out of a string recursively +set "string_value=%~2" +:undouble_continue +set "check=!string_value:%~3%~3=%~3!" +if not "!check!" == "!string_value!" ( + set "string_value=!check!" + goto :undouble_continue +) +set "%~1=!check!" +goto :EOF + +:updatepath +set "spath=" +set "upath=" +for /f "USEBACKQ tokens=2* delims= " %%i in (`!syspath!reg.exe query "HKCU\Environment" /v "Path" 2^> nul`) do ( if not "%%j" == "" set "upath=%%j" ) +for /f "USEBACKQ tokens=2* delims= " %%i in (`!syspath!reg.exe query "HKLM\SYSTEM\CurrentControlSet\Control\Session Manager\Environment" /v "Path" 2^> nul`) do ( if not "%%j" == "" set "spath=%%j" ) +if not "%spath%" == "" ( + REM We got something in the system path + set "PATH=%spath%" + if not "%upath%" == "" ( + REM We also have something in the user path + set "PATH=%PATH%;%upath%" + ) +) else if not "%upath%" == "" ( + set "PATH=%upath%" +) +REM Remove double semicolons from the adjusted PATH +call :undouble "PATH" "%PATH%" ";" +goto :EOF + +:getsyspath +REM Helper method to return a valid path to cmd.exe, reg.exe, and where.exe by +REM walking the ComSpec var - will also repair it in memory if need be +REM Strip double semi-colons +call :undouble "temppath" "%ComSpec%" ";" + +REM Dirty hack to leverage the "line feed" approach - there are some odd side +REM effects with this. Do not use this variable name in comments near this +REM line - as it seems to behave erradically. +(set LF=^ +%=this line is empty=% +) +REM Replace instances of semi-colons with a line feed and wrap +REM in parenthesis to work around some strange batch behavior +set "testpath=%temppath:;=!LF!%" + +REM Let's walk each path and test if cmd.exe, reg.exe, and where.exe exist there +set /a found=0 +for /f "tokens=* delims=" %%i in ("!testpath!") do ( + REM Only continue if we haven't found it yet + if not "%%i" == "" ( + if !found! lss 1 ( + set "checkpath=%%i" + REM Remove "cmd.exe" from the end if it exists + if /i "!checkpath:~-7!" == "cmd.exe" ( + set "checkpath=!checkpath:~0,-7!" + ) + REM Pad the end with a backslash if needed + if not "!checkpath:~-1!" == "\" ( + set "checkpath=!checkpath!\" + ) + REM Let's see if cmd, reg, and where exist there - and set it if so + if EXIST "!checkpath!cmd.exe" ( + if EXIST "!checkpath!reg.exe" ( + if EXIST "!checkpath!where.exe" ( + set /a found=1 + set "ComSpec=!checkpath!cmd.exe" + set "%~1=!checkpath!" + ) + ) + ) + ) + ) +) +goto :EOF diff --git a/ACPI/SSDTTime-master/PatchMerge.command b/ACPI/SSDTTime-master/PatchMerge.command new file mode 100644 index 0000000..43b08a2 --- /dev/null +++ b/ACPI/SSDTTime-master/PatchMerge.command @@ -0,0 +1,339 @@ +#!/usr/bin/env bash + +# Get the curent directory, the script name +# and the script name with "py" substituted for the extension. +args=( "$@" ) +dir="$(cd -- "$(dirname "$0")" >/dev/null 2>&1; pwd -P)" +script="${0##*/}" +target="${script%.*}.py" + +# use_py3: +# TRUE = Use if found, use py2 otherwise +# FALSE = Use py2 +# FORCE = Use py3 +use_py3="TRUE" + +# We'll parse if the first argument passed is +# --install-python and if so, we'll just install +# Can optionally take a version number as the +# second arg - i.e. --install-python 3.13.1 +just_installing="FALSE" + +tempdir="" + +compare_to_version () { + # Compares our OS version to the passed OS version, and + # return a 1 if we match the passed compare type, or a 0 if we don't. + # $1 = 0 (equal), 1 (greater), 2 (less), 3 (gequal), 4 (lequal) + # $2 = OS version to compare ours to + if [ -z "$1" ] || [ -z "$2" ]; then + # Missing info - bail. + return + fi + local current_os= comp= + current_os="$(sw_vers -productVersion 2>/dev/null)" + comp="$(vercomp "$current_os" "$2")" + # Check gequal and lequal first + if [[ "$1" == "3" && ("$comp" == "1" || "$comp" == "0") ]] || [[ "$1" == "4" && ("$comp" == "2" || "$comp" == "0") ]] || [[ "$comp" == "$1" ]]; then + # Matched + echo "1" + else + # No match + echo "0" + fi +} + +set_use_py3_if () { + # Auto sets the "use_py3" variable based on + # conditions passed + # $1 = 0 (equal), 1 (greater), 2 (less), 3 (gequal), 4 (lequal) + # $2 = OS version to compare + # $3 = TRUE/FALSE/FORCE in case of match + if [ -z "$1" ] || [ -z "$2" ] || [ -z "$3" ]; then + # Missing vars - bail with no changes. + return + fi + if [ "$(compare_to_version "$1" "$2")" == "1" ]; then + use_py3="$3" + fi +} + +get_remote_py_version () { + local pyurl= py_html= py_vers= py_num="3" + pyurl="https://www.python.org/downloads/macos/" + py_html="$(curl -L $pyurl --compressed 2>&1)" + if [ -z "$use_py3" ]; then + use_py3="TRUE" + fi + if [ "$use_py3" == "FALSE" ]; then + py_num="2" + fi + py_vers="$(echo "$py_html" | grep -i "Latest Python $py_num Release" | awk '{print $8}' | cut -d'<' -f1)" + echo "$py_vers" +} + +download_py () { + local vers="$1" url= + clear + echo " ### ###" + echo " # Downloading Python #" + echo "### ###" + echo + if [ -z "$vers" ]; then + echo "Gathering latest version..." + vers="$(get_remote_py_version)" + if [ -z "$vers" ]; then + if [ "$just_installing" == "TRUE" ]; then + echo " - Failed to get info!" + exit 1 + else + # Didn't get it still - bail + print_error + fi + fi + echo "Located Version: $vers" + else + # Got a version passed + echo "User-Provided Version: $vers" + fi + echo "Building download url..." + url="$(curl -L https://www.python.org/downloads/release/python-${vers//./}/ --compressed 2>&1 | grep -iE "python-$vers-macos.*.pkg\"" | awk -F'"' '{ print $2 }' | head -n 1)" + if [ -z "$url" ]; then + if [ "$just_installing" == "TRUE" ]; then + echo " - Failed to build download url!" + exit 1 + else + # Couldn't get the URL - bail + print_error + fi + fi + echo " - $url" + echo "Downloading..." + # Create a temp dir and download to it + tempdir="$(mktemp -d 2>/dev/null || mktemp -d -t 'tempdir')" + curl "$url" -o "$tempdir/python.pkg" + if [ "$?" != "0" ]; then + echo " - Failed to download python installer!" + exit $? + fi + echo + echo "Running python install package..." + echo + sudo installer -pkg "$tempdir/python.pkg" -target / + echo + if [ "$?" != "0" ]; then + echo " - Failed to install python!" + exit $? + fi + # Now we expand the package and look for a shell update script + pkgutil --expand "$tempdir/python.pkg" "$tempdir/python" + if [ -e "$tempdir/python/Python_Shell_Profile_Updater.pkg/Scripts/postinstall" ]; then + # Run the script + echo "Updating PATH..." + echo + "$tempdir/python/Python_Shell_Profile_Updater.pkg/Scripts/postinstall" + echo + fi + vers_folder="Python $(echo "$vers" | cut -d'.' -f1 -f2)" + if [ -f "/Applications/$vers_folder/Install Certificates.command" ]; then + # Certs script exists - let's execute that to make sure our certificates are updated + echo "Updating Certificates..." + echo + "/Applications/$vers_folder/Install Certificates.command" + echo + fi + echo "Cleaning up..." + cleanup + if [ "$just_installing" == "TRUE" ]; then + echo + echo "Done." + else + # Now we check for py again + downloaded="TRUE" + clear + main + fi +} + +cleanup () { + if [ -d "$tempdir" ]; then + rm -Rf "$tempdir" + fi +} + +print_error() { + clear + cleanup + echo " ### ###" + echo " # Python Not Found #" + echo "### ###" + echo + echo "Python is not installed or not found in your PATH var." + echo + if [ "$kernel" == "Darwin" ]; then + echo "Please go to https://www.python.org/downloads/macos/ to" + echo "download and install the latest version, then try again." + else + echo "Please install python through your package manager and" + echo "try again." + fi + echo + exit 1 +} + +print_target_missing() { + clear + cleanup + echo " ### ###" + echo " # Target Not Found #" + echo "### ###" + echo + echo "Could not locate $target!" + echo + exit 1 +} + +format_version () { + local vers="$1" + echo "$(echo "$1" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }')" +} + +vercomp () { + # Modified from: https://apple.stackexchange.com/a/123408/11374 + local ver1="$(format_version "$1")" ver2="$(format_version "$2")" + if [ $ver1 -gt $ver2 ]; then + echo "1" + elif [ $ver1 -lt $ver2 ]; then + echo "2" + else + echo "0" + fi +} + +get_local_python_version() { + # $1 = Python bin name (defaults to python3) + # Echoes the path to the highest version of the passed python bin if any + local py_name="$1" max_version= python= python_version= python_path= + if [ -z "$py_name" ]; then + py_name="python3" + fi + py_list="$(which -a "$py_name" 2>/dev/null)" + # Walk that newline separated list + while read python; do + if [ -z "$python" ]; then + # Got a blank line - skip + continue + fi + if [ "$check_py3_stub" == "1" ] && [ "$python" == "/usr/bin/python3" ]; then + # See if we have a valid developer path + xcode-select -p > /dev/null 2>&1 + if [ "$?" != "0" ]; then + # /usr/bin/python3 path - but no valid developer dir + continue + fi + fi + python_version="$(get_python_version $python)" + if [ -z "$python_version" ]; then + # Didn't find a py version - skip + continue + fi + # Got the py version - compare to our max + if [ -z "$max_version" ] || [ "$(vercomp "$python_version" "$max_version")" == "1" ]; then + # Max not set, or less than the current - update it + max_version="$python_version" + python_path="$python" + fi + done <<< "$py_list" + echo "$python_path" +} + +get_python_version() { + local py_path="$1" py_version= + # Get the python version by piping stderr into stdout (for py2), then grepping the output for + # the word "python", getting the second element, and grepping for an alphanumeric version number + py_version="$($py_path -V 2>&1 | grep -i python | cut -d' ' -f2 | grep -E "[A-Za-z\d\.]+")" + if [ ! -z "$py_version" ]; then + echo "$py_version" + fi +} + +prompt_and_download() { + if [ "$downloaded" != "FALSE" ] || [ "$kernel" != "Darwin" ]; then + # We already tried to download, or we're not on macOS - just bail + print_error + fi + clear + echo " ### ###" + echo " # Python Not Found #" + echo "### ###" + echo + target_py="Python 3" + printed_py="Python 2 or 3" + if [ "$use_py3" == "FORCE" ]; then + printed_py="Python 3" + elif [ "$use_py3" == "FALSE" ]; then + target_py="Python 2" + printed_py="Python 2" + fi + echo "Could not locate $printed_py!" + echo + echo "This script requires $printed_py to run." + echo + while true; do + read -p "Would you like to install the latest $target_py now? (y/n): " yn + case $yn in + [Yy]* ) download_py;break;; + [Nn]* ) print_error;; + esac + done +} + +main() { + local python= version= + # Verify our target exists + if [ ! -f "$dir/$target" ]; then + # Doesn't exist + print_target_missing + fi + if [ -z "$use_py3" ]; then + use_py3="TRUE" + fi + if [ "$use_py3" != "FALSE" ]; then + # Check for py3 first + python="$(get_local_python_version python3)" + fi + if [ "$use_py3" != "FORCE" ] && [ -z "$python" ]; then + # We aren't using py3 explicitly, and we don't already have a path + python="$(get_local_python_version python2)" + if [ -z "$python" ]; then + # Try just looking for "python" + python="$(get_local_python_version python)" + fi + fi + if [ -z "$python" ]; then + # Didn't ever find it - prompt + prompt_and_download + return 1 + fi + # Found it - start our script and pass all args + "$python" "$dir/$target" "${args[@]}" +} + +# Keep track of whether or not we're on macOS to determine if +# we can download and install python for the user as needed. +kernel="$(uname -s)" +# Check to see if we need to force based on +# macOS version. 10.15 has a dummy python3 version +# that can trip up some py3 detection in other scripts. +# set_use_py3_if "3" "10.15" "FORCE" +downloaded="FALSE" +# Check for the aforementioned /usr/bin/python3 stub if +# our OS version is 10.15 or greater. +check_py3_stub="$(compare_to_version "3" "10.15")" +trap cleanup EXIT +if [ "$1" == "--install-python" ] && [ "$kernel" == "Darwin" ]; then + just_installing="TRUE" + download_py "$2" +else + main +fi diff --git a/ACPI/SSDTTime-master/PatchMerge.py b/ACPI/SSDTTime-master/PatchMerge.py new file mode 100644 index 0000000..6f6aae0 --- /dev/null +++ b/ACPI/SSDTTime-master/PatchMerge.py @@ -0,0 +1,586 @@ +from Scripts import utils, plist +import argparse, os + +class PatchMerge: + def __init__(self, config=None, results=None, overwrite=False, interactive=True): + self.u = utils.Utils("Patch Merge") + self.w = 80 + self.h = 24 + self.red = "\u001b[41;1m" + self.yel = "\u001b[43;1m" + self.grn = "\u001b[42;1m" + self.blu = "\u001b[46;1m" + self.rst = "\u001b[0m" + self.copy_as_path = self.u.check_admin() if os.name=="nt" else False + if 2/3==0: + # ANSI escapes don't seem to work properly with python 2.x + self.red = self.yel = self.grn = self.blu = self.rst = "" + if os.name == "nt": + if 2/3!=0: + os.system("color") # Allow ANSI color escapes. + self.w = 120 + self.h = 30 + self.interactive = interactive + self.overwrite = overwrite + self.target_patches = ( + ("OpenCore","patches_OC.plist"), + ("Clover","patches_Clover.plist") + ) + self.config_path = config + self.config_type = None + self.output = results or self.get_default_results_folder() + # Expand paths as needed + if self.config_path: + self.config_path = os.path.realpath(self.config_path) + self.config_type,_,_ = self.get_plist_info(self.config_path) + if self.output: + self.output = os.path.realpath(self.output) + + def _get_patches_plists(self, path): + # Append patches_OC/Clover.plist to the path, and return a list + # with the format: + # ((oc_path,exists,plist_name),(clover_path,exists,plist_name)) + path_checks = [] + for p_type,name in self.target_patches: + if path: + p = os.path.join(path,name) + isfile = os.path.isfile(p) + else: + p = None + isfile = False + path_checks.append(( + p, + isfile, + name + )) + return path_checks + + def get_default_results_folder(self, prompt=False): + # Let's attempt to locate a Results folder either in the same + # directory as this script, or in the parent directory. + # If none is found - we'll have to prompt the user as needed. + # + # Try our directory first + local_path = os.path.dirname(os.path.realpath(__file__)) + local_results = os.path.join(local_path,"Results") + parent_results = os.path.realpath(os.path.join(local_path,"..","Results")) + potentials = [] + for path in (local_results,parent_results): + if os.path.isdir(path): + # Check if we have the files we need + o,c = self._get_patches_plists(path) + if o[1] or c[1]: + potentials.append(path) + if potentials: + return potentials[0] + # If we got here - we didn't find anything - check if we need + # to prompt + if not prompt: + # Nope - bail + return None + # We're prompting + return self.select_results_folder() + + def select_results_folder(self): + while True: + self.u.head("Select Results Folder") + print("") + if self.copy_as_path: + print("NOTE: Currently running as admin on Windows - drag and drop may not work.") + print(" Shift + right-click in Explorer and select 'Copy as path' then paste here instead.") + print("") + print("M. Main Menu") + print("Q. Quit") + print("") + print("NOTE: This is the folder containing the patches_OC.plist and") + print(" patches_Clover.plist you are trying to merge. It will also be where") + print(" the patched config.plist is saved.") + print("") + path = self.u.grab("Please drag and drop the Results folder here: ") + if not path: + continue + if path.lower() == "m": + return self.output + elif path.lower() == "q": + self.u.custom_quit() + test_path = self.u.check_path(path) + if os.path.isfile(test_path): + # Got a file - get the containing folder + test_path = os.path.dirname(test_path) + if not test_path: + self.u.head("Invalid Path") + print("") + print("That path either does not exist, or is not a folder.") + print("") + self.u.grab("Press [enter] to return...") + continue + # Got a folder - check for patches_OC/Clover.plist + o,c = self._get_patches_plists(test_path) + if not (o[1] or c[1]): + # No patches plists in there + self.u.head("Missing Files") + print("") + print("Neither patches_OC.plist nor patches_Clover.plist were found at that path.") + print("") + self.u.grab("Press [enter] to return...") + continue + # We got what we need - set and return the path + self.output = test_path + return self.output + + def get_ascii_print(self, data): + # Helper to sanitize unprintable characters by replacing them with + # ? where needed + unprintables = False + all_zeroes = True + ascii_string = "" + for b in data: + if not isinstance(b,int): + try: b = ord(b) + except: pass + if b != 0: + # Not wildcard matching + all_zeroes = False + if ord(" ") <= b < ord("~"): + ascii_string += chr(b) + else: + ascii_string += "?" + unprintables = True + return (False if all_zeroes else unprintables,ascii_string) + + def check_normalize(self, patch_or_drop, normalize_headers, check_type="Patch"): + sig = ("OemTableId","TableSignature") + if normalize_headers: + # OpenCore - and NormalizeHeaders is enabled. Check if we have + # any unprintable ASCII chars in our OemTableId or TableSignature + # and warn. + if any(self.get_ascii_print(plist.extract_data(patch_or_drop.get(x,b"\x00")))[0] for x in sig): + print("\n{}!! WARNING !!{} NormalizeHeaders is {}ENABLED{}, and table ids contain unprintable".format( + self.yel, + self.rst, + self.grn, + self.rst + )) + print(" characters! {} may not match or apply!\n".format(check_type)) + return True + else: + # Not enabled - check for question marks as that may imply characters + # were sanitized when creating the patches/dropping tables. + if any(b"\x3F" in plist.extract_data(patch_or_drop.get(x,b"\x00")) for x in sig): + print("\n{}!! WARNING !!{} NormalizeHeaders is {}DISABLED{}, and table ids contain '?'!".format( + self.yel, + self.rst, + self.red, + self.rst + )) + print(" {} may not match or apply!\n".format(check_type)) + return True + return False + + def ensure_path(self, plist_data, path_list, final_type = list): + if not path_list: + return plist_data + if not isinstance(plist_data,dict): + plist_data = {} # Override it with a dict + # Set our initial reference, then iterate the + # path list + last = plist_data + for i,path in enumerate(path_list,start=1): + # Check if our next path var is in last + if not path in last: + last[path] = {} if i < len(path_list) else final_type() + # Make sure it's the correct type if we're at the + # end of the entries + if i >= len(path_list) and not isinstance(last[path],final_type): + # Override it + last[path] = final_type() + # Update our reference + last = last[path] + return plist_data + + def get_unique_name(self,name,target_folder,name_append=""): + # Get a new file name in the target folder so we don't override the original + name = os.path.basename(name) + ext = "" if not "." in name else name.split(".")[-1] + if ext: name = name[:-len(ext)-1] + if name_append: name = name+str(name_append) + check_name = ".".join((name,ext)) if ext else name + if not os.path.exists(os.path.join(target_folder,check_name)): + return check_name + # We need a unique name + num = 1 + while True: + check_name = "{}-{}".format(name,num) + if ext: check_name += "."+ext + if not os.path.exists(os.path.join(target_folder,check_name)): + return check_name + num += 1 # Increment our counter + + def pause_interactive(self, return_value=None): + if self.interactive: + print("") + self.u.grab("Press [enter] to return...") + return return_value + + def patch_plist(self): + # Retain the config name + if self.interactive: + self.u.head("Patching Plist") + print("") + # Make sure we have a config_path + if not self.config_path: + print("No target plist path specified!") + return self.pause_interactive() + # Make sure that config_path exists + if not os.path.isfile(self.config_path): + print("Could not locate target plist at:") + print(" - {}".format(self.config_path)) + return self.pause_interactive() + # Make sure our output var has a value + if not self.output: + print("No Results folder path specified!") + return self.pause_interactive() + config_name = os.path.basename(self.config_path) + print("Loading {}...".format(config_name)) + self.config_type,config_data,e = self.get_plist_info(self.config_path) + if e: + print(" - Failed to load! {}".format(e)) + return self.pause_interactive() + # Recheck the config.plist type + if not self.config_type: + print("Could not determine plist type!") + return self.pause_interactive() + # Ensure our patches plists exist, and break out info + # into the target_path and target_name as needed + target_path,_,target_name = self.get_patch_plist_for_type( + self.output, + self.config_type + ) + # This should only show up if output is None/False/empty + if not target_path: + print("Could not locate {} in:".format(target_name or "the required patches plist")) + print(" - {}".format(self.output)) + return self.pause_interactive() + # Make sure the path actually exists - and is a file + if not os.path.isfile(target_path): + print("Could not locate required patches at:") + print(" - {}".format(target_path)) + return self.pause_interactive() + # Set up some preliminary variables for reporting later + errors_found = normalize_headers = False # Default to off + target_name = os.path.basename(target_path) + print("Loading {}...".format(target_name)) + # Load the target plist + _,target_data,e = self.get_plist_info(target_path) + if e: + print(" - Failed to load! {}".format(e)) + return self.pause_interactive() + print("Ensuring paths in {} and {}...".format(config_name,target_name)) + # Make sure all the needed values are there + if self.config_type == "OpenCore": + for p in (("ACPI","Add"),("ACPI","Delete"),("ACPI","Patch")): + print(" - {}...".format(" -> ".join(p))) + config_data = self.ensure_path(config_data,p) + target_data = self.ensure_path(target_data,p) + print(" - ACPI -> Quirks...") + config_data = self.ensure_path(config_data,("ACPI","Quirks"),final_type=dict) + normalize_headers = config_data["ACPI"]["Quirks"].get("NormalizeHeaders",False) + if not isinstance(normalize_headers,(bool)): + errors_found = True + print("\n{}!! WARNING !!{} ACPI -> Quirks -> NormalizeHeaders is malformed - assuming False".format( + self.yel, + self.rst + )) + normalize_headers = False + # Set up our patch sources + ssdts = target_data["ACPI"]["Add"] + patch = target_data["ACPI"]["Patch"] + drops = target_data["ACPI"]["Delete"] + # Set up our original values + s_orig = config_data["ACPI"]["Add"] + p_orig = config_data["ACPI"]["Patch"] + d_orig = config_data["ACPI"]["Delete"] + else: + for p in (("ACPI","DropTables"),("ACPI","SortedOrder"),("ACPI","DSDT","Patches")): + print(" - {}...".format(" -> ".join(p))) + config_data = self.ensure_path(config_data,p) + target_data = self.ensure_path(target_data,p) + # Set up our patch sources + ssdts = target_data["ACPI"]["SortedOrder"] + patch = target_data["ACPI"]["DSDT"]["Patches"] + drops = target_data["ACPI"]["DropTables"] + # Set up our original values + s_orig = config_data["ACPI"]["SortedOrder"] + p_orig = config_data["ACPI"]["DSDT"]["Patches"] + d_orig = config_data["ACPI"]["DropTables"] + print("") + if not ssdts: + print("--- No SSDTs to add - skipping...") + else: + print("--- Walking target SSDTs ({:,} total)...".format(len(ssdts))) + s_rem = [] + # Gather any entries broken from user error + s_broken = [x for x in s_orig if not isinstance(x,dict)] if self.config_type == "OpenCore" else [] + for s in ssdts: + if self.config_type == "OpenCore": + print(" - Checking {}...".format(s["Path"])) + existing = [x for x in s_orig if isinstance(x,dict) and x["Path"] == s["Path"]] + else: + print(" - Checking {}...".format(s)) + existing = [x for x in s_orig if x == s] + if existing: + print(" --> Located {:,} existing to replace...".format(len(existing))) + s_rem.extend(existing) + if s_rem: + print(" - Removing {:,} existing duplicate{}...".format(len(s_rem),"" if len(s_rem)==1 else "s")) + for r in s_rem: + if r in s_orig: s_orig.remove(r) + else: + print(" - No duplicates to remove...") + print(" - Adding {:,} SSDT{}...".format(len(ssdts),"" if len(ssdts)==1 else "s")) + s_orig.extend(ssdts) + if s_broken: + errors_found = True + print("\n{}!! WARNING !!{} {:,} Malformed entr{} found - please fix your {}!".format( + self.yel, + self.rst, + len(s_broken), + "y" if len(d_broken)==1 else "ies", + config_name + )) + print("") + if not patch: + print("--- No patches to add - skipping...") + else: + print("--- Walking target patches ({:,} total)...".format(len(patch))) + p_rem = [] + # Gather any entries broken from user error + p_broken = [x for x in p_orig if not isinstance(x,dict)] + for p in patch: + print(" - Checking {}...".format(p["Comment"])) + if self.config_type == "OpenCore" and self.check_normalize(p,normalize_headers): + errors_found = True + existing = [x for x in p_orig if isinstance(x,dict) and x["Find"] == p["Find"] and x["Replace"] == p["Replace"]] + if existing: + print(" --> Located {:,} existing to replace...".format(len(existing))) + p_rem.extend(existing) + # Remove any dupes + if p_rem: + print(" - Removing {:,} existing duplicate{}...".format(len(p_rem),"" if len(p_rem)==1 else "s")) + for r in p_rem: + if r in p_orig: p_orig.remove(r) + else: + print(" - No duplicates to remove...") + print(" - Adding {:,} patch{}...".format(len(patch),"" if len(patch)==1 else "es")) + p_orig.extend(patch) + if p_broken: + errors_found = True + print("\n{}!! WARNING !!{} {:,} Malformed entr{} found - please fix your {}!".format( + self.yel, + self.rst, + len(p_broken), + "y" if len(d_broken)==1 else "ies", + config_name + )) + print("") + if not drops: + print("--- No tables to drop - skipping...") + else: + print("--- Walking target tables to drop ({:,} total)...".format(len(drops))) + d_rem = [] + # Gather any entries broken from user error + d_broken = [x for x in d_orig if not isinstance(x,dict)] + for d in drops: + if self.config_type == "OpenCore": + print(" - Checking {}...".format(d["Comment"])) + if self.check_normalize(d,normalize_headers,check_type="Dropped table"): + errors_found = True + existing = [x for x in d_orig if isinstance(x,dict) and x["TableSignature"] == d["TableSignature"] and x["OemTableId"] == d["OemTableId"]] + else: + name = " - ".join([x for x in (d.get("Signature",""),d.get("TableId","")) if x]) or "Unknown Dropped Table" + print(" - Checking {}...".format(name)) + existing = [x for x in d_orig if isinstance(x,dict) and x.get("Signature") == d.get("Signature") and x.get("TableId") == d.get("TableId")] + if existing: + print(" --> Located {:,} existing to replace...".format(len(existing))) + d_rem.extend(existing) + if d_rem: + print(" - Removing {:,} existing duplicate{}...".format(len(d_rem),"" if len(d_rem)==1 else "s")) + for r in d_rem: + if r in d_orig: d_orig.remove(r) + else: + print(" - No duplicates to remove...") + print(" - Dropping {:,} table{}...".format(len(drops),"" if len(drops)==1 else "s")) + d_orig.extend(drops) + if d_broken: + errors_found = True + print("\n{}!! WARNING !!{} {:,} Malformed entr{} found - please fix your {}!".format( + self.yel, + self.rst, + len(d_broken), + "y" if len(d_broken)==1 else "ies", + config_name + )) + print("") + if self.overwrite: + output_path = self.config_path + else: + config_name = self.get_unique_name(config_name,self.output) + output_path = os.path.join(self.output,config_name) + print("Saving to {}...".format(output_path)) + try: + plist.dump(config_data,open(output_path,"wb")) + except Exception as e: + print(" - Failed to save! {}".format(e)) + return self.pause_interactive() + print(" - Saved.") + print("") + if errors_found: + print("{}!! WARNING !!{} Potential errors were found when merging - please address them!".format( + self.yel, + self.rst + )) + print("") + if not self.overwrite: + print("{}!! WARNING !!{} Make sure you review the saved {} before replacing!".format( + self.red, + self.rst, + config_name + )) + print("") + print("Done.") + return self.pause_interactive() + + def get_plist_info(self, config_path): + # Attempts to load the passed config and return a tuple + # of (type_string,config_data,error) + type_string = config_data = e = None + try: + config_data = plist.load(open(config_path,"rb")) + except Exception as e: + return (None,None,e) + if not isinstance(config_data,dict): + e = "Invalid root node type: {}".format(type(config_data)) + else: + type_string = "OpenCore" if "PlatformInfo" in config_data else "Clover" if "SMBIOS" in config_data else None + return (type_string,config_data,None) + + def get_patch_plist_for_type(self, path, config_type): + o,c = self._get_patches_plists(path) + return { + "OpenCore":o, + "Clover":c + }.get(config_type,(None,False,None)) + + def select_plist(self): + while True: + self.u.head("Select Plist") + print("") + if self.copy_as_path: + print("NOTE: Currently running as admin on Windows - drag and drop may not work.") + print(" Shift + right-click in Explorer and select 'Copy as path' then paste here instead.") + print("") + print("M. Main Menu") + print("Q. Quit") + print("") + path = self.u.grab("Please drag and drop the config.plist here: ") + if not path: continue + if path.lower() == "m": return + elif path.lower() == "q": self.u.custom_quit() + test_path = self.u.check_path(path) + if not test_path or not os.path.isfile(test_path): + self.u.head("Invalid Path") + print("") + print("That path either does not exist, or is not a file.") + print("") + self.u.grab("Press [enter] to return...") + continue + # Got a file - try to load it + t,_,e = self.get_plist_info(test_path) + if e: + self.u.head("Invalid File") + print("") + print("That file failed to load:\n\n{}".format(e)) + print("") + self.u.grab("Press [enter] to return...") + continue + # Got a valid file + self.config_path = test_path + self.config_type = t + return + + def main(self): + # Gather some preliminary info for display + target_path,target_exists,target_name = self.get_patch_plist_for_type( + self.output, + self.config_type + ) + self.u.resize(self.w,self.h) + self.u.head() + print("") + print("Current config.plist: {}".format(self.config_path)) + print("Type of config.plist: {}".format(self.config_type or "Unknown")) + print("Results Folder: {}".format(self.output)) + print("Patches Plist: {}{}".format( + target_name or "Unknown", + "" if (not target_name or target_exists) else " - {}!! MISSING !!{}".format(self.red,self.rst) + )) + print("Overwrite Original: {}{}{}{}".format( + self.red if self.overwrite else self.grn, + "!! True !!" if self.overwrite else "False", + self.rst, + " - Make Sure You Have A Backup!" if self.overwrite else "" + )) + print("") + print("C. Select config.plist") + print("O. Toggle Overwrite Original") + print("R. Select Results Folder") + if self.config_path and target_exists: + print("P. Patch with {}".format(target_name)) + print("") + print("Q. Quit") + print("") + menu = self.u.grab("Please make a selection: ") + if not len(menu): + return + if menu.lower() == "q": + self.u.custom_quit() + elif menu.lower() == "c": + self.select_plist() + elif menu.lower() == "o": + self.overwrite ^= True + elif menu.lower() == "r": + self.select_results_folder() + elif menu.lower() == "p" and self.config_path and target_exists: + self.patch_plist() + +if __name__ == '__main__': + # Setup the cli args + parser = argparse.ArgumentParser(prog="PatchMerge.py", description="PatchMerge - py script to merge patches_[OC/Clover].plist with a config.plist.") + parser.add_argument("-c", "--config", help="path to target config.plist - required if running in non-interactive mode") + parser.add_argument("-r", "--results", help="path to Results folder containing patches_[OC/Clover].plist - required if running in non-interactive mode") + parser.add_argument("-o", "--overwrite", help="overwrite the original config.plist", action="store_true") + parser.add_argument("-i", "--no-interaction", help="run in non-interactive mode - requires -c and -r", action="store_true") + + args = parser.parse_args() + + p = PatchMerge( + config=args.config, + results=args.results, + overwrite=args.overwrite, + interactive=not args.no_interaction + ) + + if args.no_interaction: + # We're in non-interactive mode here + p.patch_plist() + else: + # Interactive mode + if 2/3 == 0: + input = raw_input + while True: + try: + p.main() + except Exception as e: + print("An error occurred: {}".format(e)) + print("") + input("Press [enter] to continue...") diff --git a/ACPI/SSDTTime-master/README.md b/ACPI/SSDTTime-master/README.md new file mode 100644 index 0000000..597109d --- /dev/null +++ b/ACPI/SSDTTime-master/README.md @@ -0,0 +1,47 @@ +SSDTTime +========== +A simple tool designed to make creating SSDTs simple. +Supports macOS, Linux and Windows + +## Supported SSDTs: +- SSDT-HPET + - Patches out IRQ conflicts +- SSDT-EC + - OS-aware fake EC (laptop and desktop variants) +- SSDT-USBX + - Provides generic USB power properties +- SSDT-PLUG + - Sets plugin-type = 1 on CPU0/PR00 +- SSDT-PMC + - Adds missing PMCR device for native 300-series NVRAM +- SSDT-AWAC + - Disables AWAC clock, and enables (or fakes) RTC as needed +- SSDT-USB-Reset + - Returns a zero status for detected root hubs to allow hardware querying +- SSDT-Bridge + - Create missing PCI bridges for passed device path +- SSDT-PNLF + - Sets up a PNLF device for laptop backlight control +- SSDT-XOSI + - _OSI rename and patch to return true for a range of Windows versions - also checks for OSID +- DMAR + - Remove Reserved Memory Regions from the DMAR table +- SSDT-SBUS-MCHC + - Defines an MCHC and BUS0 device for SMBus compatibility +- IMEI Bridge + - Defines IMEI - only needed on SNB + 7-series or IVB + 6-series + +Additionally on Linux and Windows the tool can be used to dump the system DSDT. + +## Instructions: +### Linux: +* Launch SSDTTime.py with any somewhat recent version of Python from either a terminal window or by running the file normally. +### macOS: +* Launch SSDTTime.command from either a terminal window or by double clicking the file. +### Windows: +* Launch SSDTTime.bat from either a terminal window or by double clicking the file. + +## Credits: +- [CorpNewt](https://github.com/CorpNewt) - Writing the script and libraries used +- [NoOne](https://github.com/IOIIIO) - Some small improvements to the script +- Rehabman/Intel - iasl diff --git a/ACPI/SSDTTime-master/SSDTTime.bat b/ACPI/SSDTTime-master/SSDTTime.bat new file mode 100644 index 0000000..154789d --- /dev/null +++ b/ACPI/SSDTTime-master/SSDTTime.bat @@ -0,0 +1,426 @@ +@echo off +REM Get our local path and args before delayed expansion - allows % and ! +set "thisDir=%~dp0" +set "args=%*" + +setlocal enableDelayedExpansion +REM Setup initial vars +set "script_name=" +set /a tried=0 +set "toask=yes" +set "pause_on_error=yes" +set "py2v=" +set "py2path=" +set "py3v=" +set "py3path=" +set "pypath=" +set "targetpy=3" + +REM use_py3: +REM TRUE = Use if found, use py2 otherwise +REM FALSE = Use py2 +REM FORCE = Use py3 +set "use_py3=TRUE" + +REM We'll parse if the first argument passed is +REM --install-python and if so, we'll just install +REM Can optionally take a version number as the +REM second arg - i.e. --install-python 3.13.1 +set "just_installing=FALSE" +set "user_provided=" + +REM Get the system32 (or equivalent) path +call :getsyspath "syspath" + +REM Make sure the syspath exists +if "!syspath!" == "" ( + if exist "%SYSTEMROOT%\system32\cmd.exe" ( + if exist "%SYSTEMROOT%\system32\reg.exe" ( + if exist "%SYSTEMROOT%\system32\where.exe" ( + REM Fall back on the default path if it exists + set "ComSpec=%SYSTEMROOT%\system32\cmd.exe" + set "syspath=%SYSTEMROOT%\system32\" + ) + ) + ) + if "!syspath!" == "" ( + cls + echo ### ### + echo # Missing Required Files # + echo ### ### + echo. + echo Could not locate cmd.exe, reg.exe, or where.exe + echo. + echo Please ensure your ComSpec environment variable is properly configured and + echo points directly to cmd.exe, then try again. + echo. + echo Current CompSpec Value: "%ComSpec%" + echo. + echo Press [enter] to quit. + pause > nul + exit /b 1 + ) +) + +if "%~1" == "--install-python" ( + set "just_installing=TRUE" + set "user_provided=%~2" + goto installpy +) + +goto checkscript + +:checkscript +REM Check for our script first +set "looking_for=!script_name!" +if "!script_name!" == "" ( + set "looking_for=%~n0.py or %~n0.command" + set "script_name=%~n0.py" + if not exist "!thisDir!\!script_name!" ( + set "script_name=%~n0.command" + ) +) +if not exist "!thisDir!\!script_name!" ( + cls + echo ### ### + echo # Target Not Found # + echo ### ### + echo. + echo Could not find !looking_for!. + echo Please make sure to run this script from the same directory + echo as !looking_for!. + echo. + echo Press [enter] to quit. + pause > nul + exit /b 1 +) +goto checkpy + +:checkpy +call :updatepath +for /f "USEBACKQ tokens=*" %%x in (`!syspath!where.exe python 2^> nul`) do ( call :checkpyversion "%%x" "py2v" "py2path" "py3v" "py3path" ) +for /f "USEBACKQ tokens=*" %%x in (`!syspath!where.exe python3 2^> nul`) do ( call :checkpyversion "%%x" "py2v" "py2path" "py3v" "py3path" ) +for /f "USEBACKQ tokens=*" %%x in (`!syspath!where.exe py 2^> nul`) do ( call :checkpylauncher "%%x" "py2v" "py2path" "py3v" "py3path" ) +REM Walk our returns to see if we need to install +if /i "!use_py3!" == "FALSE" ( + set "targetpy=2" + set "pypath=!py2path!" +) else if /i "!use_py3!" == "FORCE" ( + set "pypath=!py3path!" +) else if /i "!use_py3!" == "TRUE" ( + set "pypath=!py3path!" + if "!pypath!" == "" set "pypath=!py2path!" +) +if not "!pypath!" == "" ( + goto runscript +) +if !tried! lss 1 ( + if /i "!toask!"=="yes" ( + REM Better ask permission first + goto askinstall + ) else ( + goto installpy + ) +) else ( + cls + echo ### ### + echo # Python Not Found # + echo ### ### + echo. + REM Couldn't install for whatever reason - give the error message + echo Python is not installed or not found in your PATH var. + echo Please go to https://www.python.org/downloads/windows/ to + echo download and install the latest version, then try again. + echo. + echo Make sure you check the box labeled: + echo. + echo "Add Python X.X to PATH" + echo. + echo Where X.X is the py version you're installing. + echo. + echo Press [enter] to quit. + pause > nul + exit /b 1 +) +goto runscript + +:checkpylauncher +REM Attempt to check the latest python 2 and 3 versions via the py launcher +for /f "USEBACKQ tokens=*" %%x in (`%~1 -2 -c "import sys; print(sys.executable)" 2^> nul`) do ( call :checkpyversion "%%x" "%~2" "%~3" "%~4" "%~5" ) +for /f "USEBACKQ tokens=*" %%x in (`%~1 -3 -c "import sys; print(sys.executable)" 2^> nul`) do ( call :checkpyversion "%%x" "%~2" "%~3" "%~4" "%~5" ) +goto :EOF + +:checkpyversion +set "version="&for /f "tokens=2* USEBACKQ delims= " %%a in (`"%~1" -V 2^>^&1`) do ( + REM Ensure we have a version number + call :isnumber "%%a" + if not "!errorlevel!" == "0" goto :EOF + set "version=%%a" +) +if not defined version goto :EOF +if "!version:~0,1!" == "2" ( + REM Python 2 + call :comparepyversion "!version!" "!%~2!" + if "!errorlevel!" == "1" ( + set "%~2=!version!" + set "%~3=%~1" + ) +) else ( + REM Python 3 + call :comparepyversion "!version!" "!%~4!" + if "!errorlevel!" == "1" ( + set "%~4=!version!" + set "%~5=%~1" + ) +) +goto :EOF + +:isnumber +set "var="&for /f "delims=0123456789." %%i in ("%~1") do set var=%%i +if defined var (exit /b 1) +exit /b 0 + +:comparepyversion +REM Exits with status 0 if equal, 1 if v1 gtr v2, 2 if v1 lss v2 +for /f "tokens=1,2,3 delims=." %%a in ("%~1") do ( + set a1=%%a + set a2=%%b + set a3=%%c +) +for /f "tokens=1,2,3 delims=." %%a in ("%~2") do ( + set b1=%%a + set b2=%%b + set b3=%%c +) +if not defined a1 set a1=0 +if not defined a2 set a2=0 +if not defined a3 set a3=0 +if not defined b1 set b1=0 +if not defined b2 set b2=0 +if not defined b3 set b3=0 +if %a1% gtr %b1% exit /b 1 +if %a1% lss %b1% exit /b 2 +if %a2% gtr %b2% exit /b 1 +if %a2% lss %b2% exit /b 2 +if %a3% gtr %b3% exit /b 1 +if %a3% lss %b3% exit /b 2 +exit /b 0 + +:askinstall +cls +echo ### ### +echo # Python Not Found # +echo ### ### +echo. +echo Python !targetpy! was not found on the system or in the PATH var. +echo. +set /p "menu=Would you like to install it now? [y/n]: " +if /i "!menu!"=="y" ( + REM We got the OK - install it + goto installpy +) else if "!menu!"=="n" ( + REM No OK here... + set /a tried=!tried!+1 + goto checkpy +) +REM Incorrect answer - go back +goto askinstall + +:installpy +REM This will attempt to download and install python +set /a tried=!tried!+1 +cls +echo ### ### +echo # Downloading Python # +echo ### ### +echo. +set "release=!user_provided!" +if "!release!" == "" ( + REM No explicit release set - get the latest from python.org + echo Gathering latest version... + powershell -command "[Net.ServicePointManager]::SecurityProtocol=[Net.SecurityProtocolType]::Tls12;(new-object System.Net.WebClient).DownloadFile('https://www.python.org/downloads/windows/','%TEMP%\pyurl.txt')" + REM Extract it if it's gzip compressed + powershell -command "$infile='%TEMP%\pyurl.txt';$outfile='%TEMP%\pyurl.temp';try{$input=New-Object System.IO.FileStream $infile,([IO.FileMode]::Open),([IO.FileAccess]::Read),([IO.FileShare]::Read);$output=New-Object System.IO.FileStream $outfile,([IO.FileMode]::Create),([IO.FileAccess]::Write),([IO.FileShare]::None);$gzipStream=New-Object System.IO.Compression.GzipStream $input,([IO.Compression.CompressionMode]::Decompress);$buffer=New-Object byte[](1024);while($true){$read=$gzipstream.Read($buffer,0,1024);if($read -le 0){break};$output.Write($buffer,0,$read)};$gzipStream.Close();$output.Close();$input.Close();Move-Item -Path $outfile -Destination $infile -Force}catch{}" + if not exist "%TEMP%\pyurl.txt" ( + if /i "!just_installing!" == "TRUE" ( + echo - Failed to get info + exit /b 1 + ) else ( + goto checkpy + ) + ) + pushd "%TEMP%" + :: Version detection code slimmed by LussacZheng (https://github.com/corpnewt/gibMacOS/issues/20) + for /f "tokens=9 delims=< " %%x in ('findstr /i /c:"Latest Python !targetpy! Release" pyurl.txt') do ( set "release=%%x" ) + popd + REM Let's delete our txt file now - we no longer need it + del "%TEMP%\pyurl.txt" + if "!release!" == "" ( + if /i "!just_installing!" == "TRUE" ( + echo - Failed to get python version + exit /b 1 + ) else ( + goto checkpy + ) + ) + echo Located Version: !release! +) else ( + echo User-Provided Version: !release! + REM Update our targetpy to reflect the first number of + REM our release + for /f "tokens=1 delims=." %%a in ("!release!") do ( + call :isnumber "%%a" + if "!errorlevel!" == "0" ( + set "targetpy=%%a" + ) + ) +) +echo Building download url... +REM At this point - we should have the version number. +REM We can build the url like so: "https://www.python.org/ftp/python/[version]/python-[version]-amd64.exe" +set "url=https://www.python.org/ftp/python/!release!/python-!release!-amd64.exe" +set "pytype=exe" +if "!targetpy!" == "2" ( + set "url=https://www.python.org/ftp/python/!release!/python-!release!.amd64.msi" + set "pytype=msi" +) +echo - !url! +echo Downloading... +REM Now we download it with our slick powershell command +powershell -command "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; (new-object System.Net.WebClient).DownloadFile('!url!','%TEMP%\pyinstall.!pytype!')" +REM If it doesn't exist - we bail +if not exist "%TEMP%\pyinstall.!pytype!" ( + if /i "!just_installing!" == "TRUE" ( + echo - Failed to download python installer + exit /b 1 + ) else ( + goto checkpy + ) +) +REM It should exist at this point - let's run it to install silently +echo Running python !pytype! installer... +pushd "%TEMP%" +if /i "!pytype!" == "exe" ( + echo - pyinstall.exe /quiet PrependPath=1 Include_test=0 Shortcuts=0 Include_launcher=0 + pyinstall.exe /quiet PrependPath=1 Include_test=0 Shortcuts=0 Include_launcher=0 +) else ( + set "foldername=!release:.=!" + echo - msiexec /i pyinstall.msi /qb ADDLOCAL=ALL TARGETDIR="%LocalAppData%\Programs\Python\Python!foldername:~0,2!" + msiexec /i pyinstall.msi /qb ADDLOCAL=ALL TARGETDIR="%LocalAppData%\Programs\Python\Python!foldername:~0,2!" +) +popd +set "py_error=!errorlevel!" +echo Installer finished with status: !py_error! +echo Cleaning up... +REM Now we should be able to delete the installer and check for py again +del "%TEMP%\pyinstall.!pytype!" +REM If it worked, then we should have python in our PATH +REM this does not get updated right away though - let's try +REM manually updating the local PATH var +call :updatepath +if /i "!just_installing!" == "TRUE" ( + echo. + echo Done. +) else ( + goto checkpy +) +exit /b + +:runscript +REM Python found +cls +REM Checks the args gathered at the beginning of the script. +REM Make sure we're not just forwarding empty quotes. +set "arg_test=!args:"=!" +if "!arg_test!"=="" ( + "!pypath!" "!thisDir!!script_name!" +) else ( + "!pypath!" "!thisDir!!script_name!" !args! +) +if /i "!pause_on_error!" == "yes" ( + if not "%ERRORLEVEL%" == "0" ( + echo. + echo Script exited with error code: %ERRORLEVEL% + echo. + echo Press [enter] to exit... + pause > nul + ) +) +goto :EOF + +:undouble +REM Helper function to strip doubles of a single character out of a string recursively +set "string_value=%~2" +:undouble_continue +set "check=!string_value:%~3%~3=%~3!" +if not "!check!" == "!string_value!" ( + set "string_value=!check!" + goto :undouble_continue +) +set "%~1=!check!" +goto :EOF + +:updatepath +set "spath=" +set "upath=" +for /f "USEBACKQ tokens=2* delims= " %%i in (`!syspath!reg.exe query "HKCU\Environment" /v "Path" 2^> nul`) do ( if not "%%j" == "" set "upath=%%j" ) +for /f "USEBACKQ tokens=2* delims= " %%i in (`!syspath!reg.exe query "HKLM\SYSTEM\CurrentControlSet\Control\Session Manager\Environment" /v "Path" 2^> nul`) do ( if not "%%j" == "" set "spath=%%j" ) +if not "%spath%" == "" ( + REM We got something in the system path + set "PATH=%spath%" + if not "%upath%" == "" ( + REM We also have something in the user path + set "PATH=%PATH%;%upath%" + ) +) else if not "%upath%" == "" ( + set "PATH=%upath%" +) +REM Remove double semicolons from the adjusted PATH +call :undouble "PATH" "%PATH%" ";" +goto :EOF + +:getsyspath +REM Helper method to return a valid path to cmd.exe, reg.exe, and where.exe by +REM walking the ComSpec var - will also repair it in memory if need be +REM Strip double semi-colons +call :undouble "temppath" "%ComSpec%" ";" + +REM Dirty hack to leverage the "line feed" approach - there are some odd side +REM effects with this. Do not use this variable name in comments near this +REM line - as it seems to behave erradically. +(set LF=^ +%=this line is empty=% +) +REM Replace instances of semi-colons with a line feed and wrap +REM in parenthesis to work around some strange batch behavior +set "testpath=%temppath:;=!LF!%" + +REM Let's walk each path and test if cmd.exe, reg.exe, and where.exe exist there +set /a found=0 +for /f "tokens=* delims=" %%i in ("!testpath!") do ( + REM Only continue if we haven't found it yet + if not "%%i" == "" ( + if !found! lss 1 ( + set "checkpath=%%i" + REM Remove "cmd.exe" from the end if it exists + if /i "!checkpath:~-7!" == "cmd.exe" ( + set "checkpath=!checkpath:~0,-7!" + ) + REM Pad the end with a backslash if needed + if not "!checkpath:~-1!" == "\" ( + set "checkpath=!checkpath!\" + ) + REM Let's see if cmd, reg, and where exist there - and set it if so + if EXIST "!checkpath!cmd.exe" ( + if EXIST "!checkpath!reg.exe" ( + if EXIST "!checkpath!where.exe" ( + set /a found=1 + set "ComSpec=!checkpath!cmd.exe" + set "%~1=!checkpath!" + ) + ) + ) + ) + ) +) +goto :EOF diff --git a/ACPI/SSDTTime-master/SSDTTime.command b/ACPI/SSDTTime-master/SSDTTime.command new file mode 100644 index 0000000..43b08a2 --- /dev/null +++ b/ACPI/SSDTTime-master/SSDTTime.command @@ -0,0 +1,339 @@ +#!/usr/bin/env bash + +# Get the curent directory, the script name +# and the script name with "py" substituted for the extension. +args=( "$@" ) +dir="$(cd -- "$(dirname "$0")" >/dev/null 2>&1; pwd -P)" +script="${0##*/}" +target="${script%.*}.py" + +# use_py3: +# TRUE = Use if found, use py2 otherwise +# FALSE = Use py2 +# FORCE = Use py3 +use_py3="TRUE" + +# We'll parse if the first argument passed is +# --install-python and if so, we'll just install +# Can optionally take a version number as the +# second arg - i.e. --install-python 3.13.1 +just_installing="FALSE" + +tempdir="" + +compare_to_version () { + # Compares our OS version to the passed OS version, and + # return a 1 if we match the passed compare type, or a 0 if we don't. + # $1 = 0 (equal), 1 (greater), 2 (less), 3 (gequal), 4 (lequal) + # $2 = OS version to compare ours to + if [ -z "$1" ] || [ -z "$2" ]; then + # Missing info - bail. + return + fi + local current_os= comp= + current_os="$(sw_vers -productVersion 2>/dev/null)" + comp="$(vercomp "$current_os" "$2")" + # Check gequal and lequal first + if [[ "$1" == "3" && ("$comp" == "1" || "$comp" == "0") ]] || [[ "$1" == "4" && ("$comp" == "2" || "$comp" == "0") ]] || [[ "$comp" == "$1" ]]; then + # Matched + echo "1" + else + # No match + echo "0" + fi +} + +set_use_py3_if () { + # Auto sets the "use_py3" variable based on + # conditions passed + # $1 = 0 (equal), 1 (greater), 2 (less), 3 (gequal), 4 (lequal) + # $2 = OS version to compare + # $3 = TRUE/FALSE/FORCE in case of match + if [ -z "$1" ] || [ -z "$2" ] || [ -z "$3" ]; then + # Missing vars - bail with no changes. + return + fi + if [ "$(compare_to_version "$1" "$2")" == "1" ]; then + use_py3="$3" + fi +} + +get_remote_py_version () { + local pyurl= py_html= py_vers= py_num="3" + pyurl="https://www.python.org/downloads/macos/" + py_html="$(curl -L $pyurl --compressed 2>&1)" + if [ -z "$use_py3" ]; then + use_py3="TRUE" + fi + if [ "$use_py3" == "FALSE" ]; then + py_num="2" + fi + py_vers="$(echo "$py_html" | grep -i "Latest Python $py_num Release" | awk '{print $8}' | cut -d'<' -f1)" + echo "$py_vers" +} + +download_py () { + local vers="$1" url= + clear + echo " ### ###" + echo " # Downloading Python #" + echo "### ###" + echo + if [ -z "$vers" ]; then + echo "Gathering latest version..." + vers="$(get_remote_py_version)" + if [ -z "$vers" ]; then + if [ "$just_installing" == "TRUE" ]; then + echo " - Failed to get info!" + exit 1 + else + # Didn't get it still - bail + print_error + fi + fi + echo "Located Version: $vers" + else + # Got a version passed + echo "User-Provided Version: $vers" + fi + echo "Building download url..." + url="$(curl -L https://www.python.org/downloads/release/python-${vers//./}/ --compressed 2>&1 | grep -iE "python-$vers-macos.*.pkg\"" | awk -F'"' '{ print $2 }' | head -n 1)" + if [ -z "$url" ]; then + if [ "$just_installing" == "TRUE" ]; then + echo " - Failed to build download url!" + exit 1 + else + # Couldn't get the URL - bail + print_error + fi + fi + echo " - $url" + echo "Downloading..." + # Create a temp dir and download to it + tempdir="$(mktemp -d 2>/dev/null || mktemp -d -t 'tempdir')" + curl "$url" -o "$tempdir/python.pkg" + if [ "$?" != "0" ]; then + echo " - Failed to download python installer!" + exit $? + fi + echo + echo "Running python install package..." + echo + sudo installer -pkg "$tempdir/python.pkg" -target / + echo + if [ "$?" != "0" ]; then + echo " - Failed to install python!" + exit $? + fi + # Now we expand the package and look for a shell update script + pkgutil --expand "$tempdir/python.pkg" "$tempdir/python" + if [ -e "$tempdir/python/Python_Shell_Profile_Updater.pkg/Scripts/postinstall" ]; then + # Run the script + echo "Updating PATH..." + echo + "$tempdir/python/Python_Shell_Profile_Updater.pkg/Scripts/postinstall" + echo + fi + vers_folder="Python $(echo "$vers" | cut -d'.' -f1 -f2)" + if [ -f "/Applications/$vers_folder/Install Certificates.command" ]; then + # Certs script exists - let's execute that to make sure our certificates are updated + echo "Updating Certificates..." + echo + "/Applications/$vers_folder/Install Certificates.command" + echo + fi + echo "Cleaning up..." + cleanup + if [ "$just_installing" == "TRUE" ]; then + echo + echo "Done." + else + # Now we check for py again + downloaded="TRUE" + clear + main + fi +} + +cleanup () { + if [ -d "$tempdir" ]; then + rm -Rf "$tempdir" + fi +} + +print_error() { + clear + cleanup + echo " ### ###" + echo " # Python Not Found #" + echo "### ###" + echo + echo "Python is not installed or not found in your PATH var." + echo + if [ "$kernel" == "Darwin" ]; then + echo "Please go to https://www.python.org/downloads/macos/ to" + echo "download and install the latest version, then try again." + else + echo "Please install python through your package manager and" + echo "try again." + fi + echo + exit 1 +} + +print_target_missing() { + clear + cleanup + echo " ### ###" + echo " # Target Not Found #" + echo "### ###" + echo + echo "Could not locate $target!" + echo + exit 1 +} + +format_version () { + local vers="$1" + echo "$(echo "$1" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }')" +} + +vercomp () { + # Modified from: https://apple.stackexchange.com/a/123408/11374 + local ver1="$(format_version "$1")" ver2="$(format_version "$2")" + if [ $ver1 -gt $ver2 ]; then + echo "1" + elif [ $ver1 -lt $ver2 ]; then + echo "2" + else + echo "0" + fi +} + +get_local_python_version() { + # $1 = Python bin name (defaults to python3) + # Echoes the path to the highest version of the passed python bin if any + local py_name="$1" max_version= python= python_version= python_path= + if [ -z "$py_name" ]; then + py_name="python3" + fi + py_list="$(which -a "$py_name" 2>/dev/null)" + # Walk that newline separated list + while read python; do + if [ -z "$python" ]; then + # Got a blank line - skip + continue + fi + if [ "$check_py3_stub" == "1" ] && [ "$python" == "/usr/bin/python3" ]; then + # See if we have a valid developer path + xcode-select -p > /dev/null 2>&1 + if [ "$?" != "0" ]; then + # /usr/bin/python3 path - but no valid developer dir + continue + fi + fi + python_version="$(get_python_version $python)" + if [ -z "$python_version" ]; then + # Didn't find a py version - skip + continue + fi + # Got the py version - compare to our max + if [ -z "$max_version" ] || [ "$(vercomp "$python_version" "$max_version")" == "1" ]; then + # Max not set, or less than the current - update it + max_version="$python_version" + python_path="$python" + fi + done <<< "$py_list" + echo "$python_path" +} + +get_python_version() { + local py_path="$1" py_version= + # Get the python version by piping stderr into stdout (for py2), then grepping the output for + # the word "python", getting the second element, and grepping for an alphanumeric version number + py_version="$($py_path -V 2>&1 | grep -i python | cut -d' ' -f2 | grep -E "[A-Za-z\d\.]+")" + if [ ! -z "$py_version" ]; then + echo "$py_version" + fi +} + +prompt_and_download() { + if [ "$downloaded" != "FALSE" ] || [ "$kernel" != "Darwin" ]; then + # We already tried to download, or we're not on macOS - just bail + print_error + fi + clear + echo " ### ###" + echo " # Python Not Found #" + echo "### ###" + echo + target_py="Python 3" + printed_py="Python 2 or 3" + if [ "$use_py3" == "FORCE" ]; then + printed_py="Python 3" + elif [ "$use_py3" == "FALSE" ]; then + target_py="Python 2" + printed_py="Python 2" + fi + echo "Could not locate $printed_py!" + echo + echo "This script requires $printed_py to run." + echo + while true; do + read -p "Would you like to install the latest $target_py now? (y/n): " yn + case $yn in + [Yy]* ) download_py;break;; + [Nn]* ) print_error;; + esac + done +} + +main() { + local python= version= + # Verify our target exists + if [ ! -f "$dir/$target" ]; then + # Doesn't exist + print_target_missing + fi + if [ -z "$use_py3" ]; then + use_py3="TRUE" + fi + if [ "$use_py3" != "FALSE" ]; then + # Check for py3 first + python="$(get_local_python_version python3)" + fi + if [ "$use_py3" != "FORCE" ] && [ -z "$python" ]; then + # We aren't using py3 explicitly, and we don't already have a path + python="$(get_local_python_version python2)" + if [ -z "$python" ]; then + # Try just looking for "python" + python="$(get_local_python_version python)" + fi + fi + if [ -z "$python" ]; then + # Didn't ever find it - prompt + prompt_and_download + return 1 + fi + # Found it - start our script and pass all args + "$python" "$dir/$target" "${args[@]}" +} + +# Keep track of whether or not we're on macOS to determine if +# we can download and install python for the user as needed. +kernel="$(uname -s)" +# Check to see if we need to force based on +# macOS version. 10.15 has a dummy python3 version +# that can trip up some py3 detection in other scripts. +# set_use_py3_if "3" "10.15" "FORCE" +downloaded="FALSE" +# Check for the aforementioned /usr/bin/python3 stub if +# our OS version is 10.15 or greater. +check_py3_stub="$(compare_to_version "3" "10.15")" +trap cleanup EXIT +if [ "$1" == "--install-python" ] && [ "$kernel" == "Darwin" ]; then + just_installing="TRUE" + download_py "$2" +else + main +fi diff --git a/ACPI/SSDTTime-master/SSDTTime.py b/ACPI/SSDTTime-master/SSDTTime.py new file mode 100644 index 0000000..756ef7f --- /dev/null +++ b/ACPI/SSDTTime-master/SSDTTime.py @@ -0,0 +1,4138 @@ +from Scripts import dsdt, plist, reveal, run, utils +import getpass, os, tempfile, shutil, plistlib, sys, binascii, zipfile, re, string, json, textwrap + +class SSDT: + def __init__(self, **kwargs): + self.u = utils.Utils("SSDT Time") + self.r = run.Run() + self.re = reveal.Reveal() + try: + self.d = dsdt.DSDT() + except Exception as e: + print("Something went wrong :( - Aborting!\n - {}".format(e)) + exit(1) + self.w = 80 + self.h = 24 + self.red = "\u001b[41;1m" + self.yel = "\u001b[43;1m" + self.grn = "\u001b[42;1m" + self.blu = "\u001b[46;1m" + self.rst = "\u001b[0m" + self.copy_as_path = self.u.check_admin() if os.name=="nt" else False + if 2/3==0: + # ANSI escapes don't seem to work properly with python 2.x + self.red = self.yel = self.grn = self.blu = self.rst = "" + if os.name == "nt": + if 2/3!=0: + os.system("color") # Allow ANSI color escapes. + self.w = 120 + self.h = 30 + self.iasl_legacy = False + self.resize_window = True + # Set up match mode approach: + # 0 = Any table id, any length + # 1 = Any table id, match length + # 2 = Match table id, match length + # 3 = Match NORMALIZED table id, match length + self.match_mode = 0 + self.match_dict = { + 0:"{}Least Strict{}".format(self.red,self.rst), + 1:"{}Length Only{}".format(self.yel,self.rst), + 2:"{}Table Ids and Length{}".format(self.grn,self.rst), + 3:"{}Table Ids and Length (NormalizeHeaders){}".format(self.blu,self.rst) + } + self.dsdt = None + self.settings = os.path.join(os.path.dirname(os.path.realpath(__file__)),"Scripts","settings.json") + if os.path.exists(self.settings): + self.load_settings() + self.output = "Results" + self.target_irqs = [0,2,8,11] + self.illegal_names = ("XHC1","EHC1","EHC2","PXSX") + # _OSI Strings found here: https://learn.microsoft.com/en-us/windows-hardware/drivers/acpi/winacpi-osi + self.osi_strings = { + "Windows 2000": "Windows 2000", + "Windows XP": "Windows 2001", + "Windows XP SP1": "Windows 2001 SP1", + "Windows Server 2003": "Windows 2001.1", + "Windows XP SP2": "Windows 2001 SP2", + "Windows Server 2003 SP1": "Windows 2001.1 SP1", + "Windows Vista": "Windows 2006", + "Windows Vista SP1": "Windows 2006 SP1", + "Windows Server 2008": "Windows 2006.1", + "Windows 7, Win Server 2008 R2": "Windows 2009", + "Windows 8, Win Server 2012": "Windows 2012", + "Windows 8.1": "Windows 2013", + "Windows 10": "Windows 2015", + "Windows 10, version 1607": "Windows 2016", + "Windows 10, version 1703": "Windows 2017", + "Windows 10, version 1709": "Windows 2017.2", + "Windows 10, version 1803": "Windows 2018", + "Windows 10, version 1809": "Windows 2018.2", + "Windows 10, version 1903": "Windows 2019", + "Windows 10, version 2004": "Windows 2020", + "Windows 11": "Windows 2021", + "Windows 11, version 22H2": "Windows 2022" + } + self.pre_patches = ( + { + "PrePatch":"GPP7 duplicate _PRW methods", + "Comment" :"GPP7._PRW to XPRW to fix Gigabyte's Mistake", + "Find" :"3708584847500A021406535245470214065350525701085F505257", + "Replace" :"3708584847500A0214065352454702140653505257010858505257" + }, + { + "PrePatch":"GPP7 duplicate UP00 devices", + "Comment" :"GPP7.UP00 to UPXX to fix Gigabyte's Mistake", + "Find" :"1047052F035F53425F50434930475050375B82450455503030", + "Replace" :"1047052F035F53425F50434930475050375B82450455505858" + }, + { + "PrePatch":"GPP6 duplicate _PRW methods", + "Comment" :"GPP6._PRW to XPRW to fix ASRock's Mistake", + "Find" :"47505036085F4144520C04000200140F5F505257", + "Replace" :"47505036085F4144520C04000200140F58505257" + }, + { + "PrePatch":"GPP1 duplicate PTXH devices", + "Comment" :"GPP1.PTXH to XTXH to fix MSI's Mistake", + "Find" :"50545848085F41445200140F", + "Replace" :"58545848085F41445200140F" + } + ) + + def save_settings(self): + settings = { + "legacy_compiler": self.iasl_legacy, + "resize_window": self.resize_window, + "match_mode": self.match_mode + } + try: json.dump(settings,open(self.settings,"w"),indent=2) + except: return + + def load_settings(self): + try: + settings = json.load(open(self.settings)) + if self.d.iasl_legacy: # Only load the legacy compiler setting if we can + self.iasl_legacy = settings.get("legacy_compiler",False) + self.resize_window = settings.get("resize_window",True) + self.match_mode = settings.get("match_mode",0) + except: return + + def get_unique_name(self,name,target_folder,name_append="-Patched"): + # Get a new file name in the Results folder so we don't override the original + name = os.path.basename(name) + ext = "" if not "." in name else name.split(".")[-1] + if ext: name = name[:-len(ext)-1] + if name_append: name = name+str(name_append) + check_name = ".".join((name,ext)) if ext else name + if not os.path.exists(os.path.join(target_folder,check_name)): + return check_name + # We need a unique name + num = 1 + while True: + check_name = "{}-{}".format(name,num) + if ext: check_name += "."+ext + if not os.path.exists(os.path.join(target_folder,check_name)): + return check_name + num += 1 # Increment our counter + + def sorted_nicely(self, l): + convert = lambda text: int(text) if text.isdigit() else text + alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key.lower()) ] + return sorted(l, key = alphanum_key) + + def load_dsdt(self, path): + if not path: + return + self.u.head("Loading ACPI Table(s)") + print("") + tables = [] + trouble_dsdt = None + fixed = False + temp = None + prior_tables = self.d.acpi_tables # Retain in case of failure + # Clear any existing tables so we load anew + self.d.acpi_tables = {} + if os.path.isdir(path): + print("Gathering valid tables from {}...\n".format(os.path.basename(path))) + for t in self.sorted_nicely(os.listdir(path)): + if self.d.table_is_valid(path,t): + print(" - {}".format(t)) + tables.append(t) + if not tables: + # Check if there's an ACPI directory within the passed + # directory - this may indicate SysReport was dropped + if os.path.isdir(os.path.join(path,"ACPI")): + # Rerun this function with that updated path + return self.load_dsdt(os.path.join(path,"ACPI")) + print(" - No valid .aml files were found!") + print("") + self.u.grab("Press [enter] to return...") + # Restore any prior tables + self.d.acpi_tables = prior_tables + return + print("") + # We got at least one file - let's look for the DSDT specifically + # and try to load that as-is. If it doesn't load, we'll have to + # manage everything with temp folders + dsdt_list = [x for x in tables if self.d._table_signature(path,x) == b"DSDT"] + if len(dsdt_list) > 1: + print("Multiple files with DSDT signature passed:") + for d in self.sorted_nicely(dsdt_list): + print(" - {}".format(d)) + print("\nOnly one is allowed at a time. Please remove all but one of the above and try") + print("again.") + print("") + self.u.grab("Press [enter] to return...") + # Restore any prior tables + self.d.acpi_tables = prior_tables + return + # Get the DSDT, if any + dsdt = dsdt_list[0] if len(dsdt_list) else None + if dsdt: # Try to load it and see if it causes problems + print("Disassembling {} to verify if pre-patches are needed...".format(dsdt)) + if not self.d.load(os.path.join(path,dsdt))[0]: + trouble_dsdt = dsdt + else: + print("\nDisassembled successfully!\n") + elif os.path.isfile(path): + print("Loading {}...".format(os.path.basename(path))) + if self.d.load(path)[0]: + print("\nDone.") + # If it loads fine - just return the path + # to the parent directory + return os.path.dirname(path) + if not self.d._table_signature(path) == b"DSDT": + # Not a DSDT, we aren't applying pre-patches + print("\n{} could not be disassembled!".format(os.path.basename(path))) + print("") + self.u.grab("Press [enter] to return...") + # Restore any prior tables + self.d.acpi_tables = prior_tables + return + # It didn't load - set it as the trouble file + trouble_dsdt = os.path.basename(path) + # Put the table in the tables list, and adjust + # the path to represent the parent dir + tables.append(os.path.basename(path)) + path = os.path.dirname(path) + else: + print("Passed file/folder does not exist!") + print("") + self.u.grab("Press [enter] to return...") + # Restore any prior tables + self.d.acpi_tables = prior_tables + return + # If we got here - check if we have a trouble_dsdt. + if trouble_dsdt: + # We need to move our ACPI files to a temp folder + # then try patching the DSDT there + temp = tempfile.mkdtemp() + for table in tables: + shutil.copy( + os.path.join(path,table), + temp + ) + # Get a reference to the new trouble file + trouble_path = os.path.join(temp,trouble_dsdt) + # Now we try patching it + print("Checking available pre-patches...") + print("Loading {} into memory...".format(trouble_dsdt)) + with open(trouble_path,"rb") as f: + d = f.read() + res = self.d.check_output(self.output) + target_name = self.get_unique_name(trouble_dsdt,res,name_append="-Patched") + patches = [] + print("Iterating patches...\n") + for p in self.pre_patches: + if not all(x in p for x in ("PrePatch","Comment","Find","Replace")): continue + print(" - {}".format(p["PrePatch"])) + find = binascii.unhexlify(p["Find"]) + if d.count(find) == 1: + patches.append(p) # Retain the patch + repl = binascii.unhexlify(p["Replace"]) + print(" --> Located - applying...") + d = d.replace(find,repl) # Replace it in memory + with open(trouble_path,"wb") as f: + f.write(d) # Write the updated file + # Attempt to load again + loaded_table = self.d.load(trouble_path)[0] + if loaded_table: + try: + table = loaded_table[list(loaded_table)[0]] + except: + pass + fixed = True + # We got it to load - let's write the patches + print("\nDisassembled successfully!\n") + self.make_plist(None, None, patches) + # Save to the local file + with open(os.path.join(res,target_name),"wb") as f: + f.write(d) + print("\n!! Patches applied to modified file in Results folder:\n {}".format(target_name)) + self.patch_warn() + break + if not fixed: + print("\n{} could not be disassembled!".format(trouble_dsdt)) + print("") + self.u.grab("Press [enter] to return...") + if temp: + shutil.rmtree(temp,ignore_errors=True) + # Restore any prior tables + self.d.acpi_tables = prior_tables + return + # Let's load the rest of the tables + if len(tables) > 1: + print("Loading valid tables in {}...".format(path)) + loaded_tables,failed = self.d.load(temp or path) + if not loaded_tables or failed: + print("\nFailed to load tables in {}{}\n".format( + os.path.dirname(path) if os.path.isfile(path) else path, + ":" if failed else "" + )) + for t in self.sorted_nicely(failed): + print(" - {}".format(t)) + # Restore any prior tables + if not loaded_tables: + self.d.acpi_tables = prior_tables + else: + if len(tables) > 1: + print("") # Newline for readability + print("Done.") + # If we had to patch the DSDT, or if not all tables loaded, + # make sure we get interaction from the user to continue + if trouble_dsdt or not loaded_tables or failed: + print("") + self.u.grab("Press [enter] to continue...") + if temp: + shutil.rmtree(temp,ignore_errors=True) + return path + + def select_dsdt(self, single_table=False): + while True: + self.u.head("Select ACPI Table{}".format("" if single_table else "s")) + print(" ") + if self.copy_as_path: + print("NOTE: Currently running as admin on Windows - drag and drop may not work.") + print(" Shift + right-click in Explorer and select 'Copy as path' then paste here instead.") + print("") + if sys.platform.startswith("linux") or sys.platform == "win32": + print("P. Dump the current system's ACPI tables") + print("M. Main") + print("Q. Quit") + print(" ") + if single_table: + print("NOTE: The function requesting this table expects either a single table, or one") + print(" with the DSDT signature. If neither condition is met, you will be") + print(" returned to the main menu.") + print("") + dsdt = self.u.grab("Please drag and drop an ACPI table or folder of tables here: ") + if dsdt.lower() == "p" and (sys.platform.startswith("linux") or sys.platform == "win32"): + output_folder = os.path.join(os.path.dirname(os.path.realpath(__file__)),self.output) + acpi_name = self.get_unique_name("OEM",output_folder,name_append="") + return self.load_dsdt( + self.d.dump_tables(os.path.join(output_folder,acpi_name)) + ) + elif dsdt.lower() == "m": + return self.dsdt + elif dsdt.lower() == "q": + self.u.custom_quit() + out = self.u.check_path(dsdt) + if not out: continue + # Got a DSDT, try to load it + return self.load_dsdt(out) + + def _ensure_dsdt(self, allow_any=False): + # Helper to check conditions for when we have valid tables + return self.dsdt and ((allow_any and self.d.acpi_tables) or (not allow_any and self.d.get_dsdt_or_only())) + + def ensure_dsdt(self, allow_any=False): + if self._ensure_dsdt(allow_any=allow_any): + # Got it already + return True + # Need to prompt + self.dsdt = self.select_dsdt(single_table=not allow_any) + if self._ensure_dsdt(allow_any=allow_any): + return True + return False + + def write_ssdt(self, ssdt_name, ssdt): + res = self.d.check_output(self.output) + dsl_path = os.path.join(res,ssdt_name+".dsl") + aml_path = os.path.join(res,ssdt_name+".aml") + iasl_path = self.d.iasl_legacy if self.iasl_legacy else self.d.iasl + with open(dsl_path,"w") as f: + f.write(ssdt) + print("Compiling...{}".format(" {}!! Using Legacy Compiler !!{}".format(self.yel,self.rst) if self.iasl_legacy else "")) + out = self.r.run({"args":[iasl_path, dsl_path]}) + if out[2] != 0: + print(" - {}".format(out[1])) + self.re.reveal(dsl_path,True) + return False + else: + self.re.reveal(aml_path,True) + return True + + def ensure_path(self, plist_data, path_list, final_type = list): + if not path_list: return plist_data + last = plist_data + for index,path in enumerate(path_list): + if not path in last: + if index >= len(path_list)-1: + last[path] = final_type() + else: + last[path] = {} + last = last[path] + return plist_data + + def make_plist(self, oc_acpi, cl_acpi, patches, drops=[], replace=False): + # if not len(patches): return # No patches to add - bail + repeat = False + print("Building patches_OC and patches_Clover plists...") + output = self.d.check_output(self.output) + oc_plist = {} + cl_plist = {} + + # Check for the plists + if os.path.isfile(os.path.join(output,"patches_OC.plist")): + e = os.path.join(output,"patches_OC.plist") + with open(e,"rb") as f: + oc_plist = plist.load(f) + if os.path.isfile(os.path.join(output,"patches_Clover.plist")): + e = os.path.join(output,"patches_Clover.plist") + with open(e,"rb") as f: + cl_plist = plist.load(f) + + # Ensure all the pathing is where it needs to be + if oc_acpi: oc_plist = self.ensure_path(oc_plist,("ACPI","Add")) + if cl_acpi: cl_plist = self.ensure_path(cl_plist,("ACPI","SortedOrder")) + if patches: + oc_plist = self.ensure_path(oc_plist,("ACPI","Patch")) + cl_plist = self.ensure_path(cl_plist,("ACPI","DSDT","Patches")) + if drops: + oc_plist = self.ensure_path(oc_plist,("ACPI","Delete")) + cl_plist = self.ensure_path(cl_plist,("ACPI","DropTables")) + + # Add the .aml references + if replace: # Remove any conflicting entries + if oc_acpi: + oc_plist["ACPI"]["Add"] = [x for x in oc_plist["ACPI"]["Add"] if oc_acpi["Path"] != x["Path"]] + if cl_acpi: + cl_plist["ACPI"]["SortedOrder"] = [x for x in cl_plist["ACPI"]["SortedOrder"] if cl_acpi != x] + if oc_acpi: # Make sure we have something + if any(oc_acpi["Path"] == x["Path"] for x in oc_plist["ACPI"]["Add"]): + print(" -> Add \"{}\" already in OC plist!".format(oc_acpi["Path"])) + else: + oc_plist["ACPI"]["Add"].append(oc_acpi) + if cl_acpi: # Make sure we have something + if cl_acpi in cl_plist["ACPI"]["SortedOrder"]: + print(" -> \"{}\" already in Clover plist!".format(cl_acpi)) + else: + cl_plist["ACPI"]["SortedOrder"].append(cl_acpi) + + # Iterate the patches + for p in patches: + ocp = self.get_oc_patch(p) + cp = self.get_clover_patch(p) + if replace: # Remove any conflicting entries + oc_plist["ACPI"]["Patch"] = [x for x in oc_plist["ACPI"]["Patch"] if ocp["Find"] != x["Find"] and ocp["Replace"] != x["Replace"]] + cl_plist["ACPI"]["DSDT"]["Patches"] = [x for x in cl_plist["ACPI"]["DSDT"]["Patches"] if cp["Find"] != x["Find"] and cp["Replace"] != x["Replace"]] + if any(x["Find"] == ocp["Find"] and x["Replace"] == ocp["Replace"] for x in oc_plist["ACPI"]["Patch"]): + print(" -> Patch \"{}\" already in OC plist!".format(p["Comment"])) + else: + print(" -> Adding Patch \"{}\" to OC plist!".format(p["Comment"])) + oc_plist["ACPI"]["Patch"].append(ocp) + if any(x["Find"] == cp["Find"] and x["Replace"] == cp["Replace"] for x in cl_plist["ACPI"]["DSDT"]["Patches"]): + print(" -> Patch \"{}\" already in Clover plist!".format(p["Comment"])) + else: + print(" -> Adding Patch \"{}\" to Clover plist!".format(p["Comment"])) + cl_plist["ACPI"]["DSDT"]["Patches"].append(cp) + + # Iterate any dropped tables + for d in drops: + ocd = self.get_oc_drop(d) + cd = self.get_clover_drop(d) + if replace: + oc_plist["ACPI"]["Delete"] = [x for x in oc_plist["ACPI"]["Delete"] if ocd["TableSignature"] != x["TableSignature"] and ocd["OemTableId"] != x["OemTableId"]] + cl_plist["ACPI"]["DropTables"] = [x for x in cl_plist["ACPI"]["DropTables"] if cd.get("Signature") != x.get("Signature") and cd.get("TableId") != x.get("TableId")] + if any(x["TableSignature"] == ocd["TableSignature"] and x["OemTableId"] == ocd["OemTableId"] for x in oc_plist["ACPI"]["Delete"]): + print(" -> \"{}\" already in OC plist!".format(d["Comment"])) + else: + print(" -> Adding \"{}\" to OC plist!".format(d["Comment"])) + oc_plist["ACPI"]["Delete"].append(ocd) + name_parts = [] + for x in ("Signature","TableId"): + if not cd.get(x): continue + n = cd[x] + if 2/3!=0 and not isinstance(n,str): + try: n = n.decode() + except: continue + name_parts.append(n.replace("?"," ").strip()) + name = " - ".join(name_parts) + if any(x.get("Signature") == cd.get("Signature") and x.get("TableId") == cd.get("TableId") for x in cl_plist["ACPI"]["DropTables"]): + print(" -> \"{}\" already in Clover plist!".format(name or "Unknown Dropped Table")) + else: + cl_plist["ACPI"]["DropTables"].append(cd) + print(" -> Adding \"{}\" to Clover plist!".format(name or "Unknown Dropped Table")) + + # Write the plists if we have something to write + if oc_plist: + with open(os.path.join(output,"patches_OC.plist"),"wb") as f: + plist.dump(oc_plist,f) + if cl_plist: + with open(os.path.join(output,"patches_Clover.plist"),"wb") as f: + plist.dump(cl_plist,f) + + def patch_warn(self): + # Warn users to ensure they merge the patches_XX.plist contents with their config.plist + print("\n{}!! WARNING !!{} Make sure you merge the contents of patches_[OC/Clover].plist".format(self.red,self.rst)) + print(" with your config.plist!\n") + + def get_lpc_name(self,log=True,skip_ec=False,skip_common_names=False): + # Intel devices appear to use _ADR, 0x001F0000 + # AMD devices appear to use _ADR, 0x00140003 + if log: print("Locating LPC(B)/SBRG...") + for table_name in self.sorted_nicely(list(self.d.acpi_tables)): + table = self.d.acpi_tables[table_name] + # The LPCB device will always be the parent of the PNP0C09 device + # if found + if not skip_ec: + ec_list = self.d.get_device_paths_with_hid("PNP0C09",table=table) + if len(ec_list): + lpc_name = ".".join(ec_list[0][0].split(".")[:-1]) + if log: print(" - Found {} in {}".format(lpc_name,table_name)) + return lpc_name + # Maybe try common names if we haven't found it yet + if not skip_common_names: + for x in ("LPCB", "LPC0", "LPC", "SBRG", "PX40"): + try: + lpc_name = self.d.get_device_paths(x,table=table)[0][0] + if log: print(" - Found {} in {}".format(lpc_name,table_name)) + return lpc_name + except: pass + # Finally check by address - some Intel tables have devices at + # 0x00140003 + paths = self.d.get_path_of_type(obj_type="Name",obj="_ADR",table=table) + for path in paths: + adr = self.get_address_from_line(path[1],table=table) + if adr in (0x001F0000, 0x00140003): + # Get the path minus ._ADR + lpc_name = path[0][:-5] + # Make sure the LPCB device does not have an _HID + lpc_hid = lpc_name+"._HID" + if any(x[0]==lpc_hid for x in table.get("paths",[])): + continue + if log: print(" - Found {} in {}".format(lpc_name,table_name)) + return lpc_name + if log: + print(" - Could not locate LPC(B)! Aborting!") + print("") + return None # Didn't find it + + def sta_needs_patching(self, sta, table): + # A helper method to determine if an _STA + # needs patching to enable based on the + # type and returns. + if not isinstance(sta,dict) or not sta.get("sta"): + return False + # Check if we have an IntObj or MethodObj + # _STA, and scrape for values if possible. + if sta.get("sta_type") == "IntObj": + # We got an int - see if it's force-enabled + try: + sta_scope = table["lines"][sta["sta"][0][1]] + if not "Name (_STA, 0x0F)" in sta_scope: + return True + except Exception as e: + return True + elif sta.get("sta_type") == "MethodObj": + # We got a method - if we have more than one + # "Return (", or not a single "Return (0x0F)", + # then we need to patch this out and replace + try: + sta_scope = "\n".join(self.d.get_scope(sta["sta"][0][1],strip_comments=True,table=table)) + if sta_scope.count("Return (") > 1 or not "Return (0x0F)" in sta_scope: + print("Multiple returns or not Return (0x0F)") + # More than one return, or our return isn't force-enabled + return True + except Exception as e: + return True + # If we got here - it's not a recognized type, or + # it was fullly qualified and doesn't need patching + return False + + def fake_ec(self, laptop = False): + if not self.ensure_dsdt(): + return + self.u.head("Fake EC") + print("") + print("Locating PNP0C09 (EC) devices...") + rename = False + named_ec = False + ec_to_patch = [] + ec_to_enable = [] + ec_sta = {} + ec_enable_sta = {} + patches = [] + lpc_name = None + ec_located = False + for table_name in self.sorted_nicely(list(self.d.acpi_tables)): + table = self.d.acpi_tables[table_name] + ec_list = self.d.get_device_paths_with_hid("PNP0C09",table=table) + if len(ec_list): + lpc_name = ".".join(ec_list[0][0].split(".")[:-1]) + print(" - Got {:,} in {}".format(len(ec_list),table_name)) + print(" - Validating...") + for x in ec_list: + device = orig_device = x[0] + print(" --> {}".format(device)) + if device.split(".")[-1] == "EC": + named_ec = True + if not laptop: + # Only rename if we're trying to replace it + print(" ----> PNP0C09 (EC) called EC. Renaming") + device = ".".join(device.split(".")[:-1]+["EC0"]) + rename = True + scope = "\n".join(self.d.get_scope(x[1],strip_comments=True,table=table)) + # We need to check for _HID, _CRS, and _GPE + if all(y in scope for y in ["_HID","_CRS","_GPE"]): + print(" ----> Valid PNP0C09 (EC) Device") + ec_located = True + sta = self.get_sta_var( + var=None, + device=orig_device, + dev_hid="PNP0C09", + dev_name=orig_device.split(".")[-1], + log_locate=False, + table=table + ) + if not laptop: + ec_to_patch.append(device) + # Only unconditionally override _STA methods + # if not building for a laptop + if sta.get("patches"): + patches.extend(sta.get("patches",[])) + ec_sta[device] = sta + elif sta.get("patches"): + if self.sta_needs_patching(sta, table=table): + # Retain the info as we need to override it + ec_to_enable.append(device) + ec_enable_sta[device] = sta + # Disable the patches by default and add to the list + for patch in sta.get("patches",[]): + patch["Enabled"] = False + patch["Disabled"] = True + patches.append(patch) + else: + print(" --> _STA properly enabled - skipping rename") + else: + print(" ----> NOT Valid PNP0C09 (EC) Device") + if not ec_located: + print(" - No valid PNP0C09 (EC) devices found - only needs a Fake EC device") + if laptop and named_ec and not patches: + print(" ----> Named EC device located - no fake needed.") + print("") + self.u.grab("Press [enter] to return to main menu...") + return + if lpc_name is None: + lpc_name = self.get_lpc_name(skip_ec=True,skip_common_names=True) + if lpc_name is None: + self.u.grab("Press [enter] to return to main menu...") + return + comment = "Faked Embedded Controller" + if laptop: + comment += " (Laptop)" + if rename == True: + patches.insert(0,{ + "Comment":"EC to EC0{}".format("" if not ec_sta else " - must come before any EC _STA to XSTA renames!"), + "Find":"45435f5f", + "Replace":"4543305f" + }) + comment += " - Needs EC to EC0 {}".format( + "and EC _STA to XSTA renames" if ec_sta else "rename" + ) + elif ec_sta: + comment += " - Needs EC _STA to XSTA renames" + oc = {"Comment":comment,"Enabled":True,"Path":"SSDT-EC.aml"} + self.make_plist(oc, "SSDT-EC.aml", patches, replace=True) + print("Creating SSDT-EC...") + ssdt = """ +DefinitionBlock ("", "SSDT", 2, "CORP ", "SsdtEC", 0x00001000) +{ + External ([[LPCName]], DeviceObj) +""".replace("[[LPCName]]",lpc_name) + for x in ec_to_patch: + ssdt += " External ({}, DeviceObj)\n".format(x) + if x in ec_sta: + ssdt += " External ({}.XSTA, {})\n".format(x,ec_sta[x].get("sta_type","MethodObj")) + # Walk the ECs to enable + for x in ec_to_enable: + ssdt += " External ({}, DeviceObj)\n".format(x) + if x in ec_enable_sta: + # Add the _STA and XSTA refs as the patch may not be enabled + ssdt += " External ({0}._STA, {1})\n External ({0}.XSTA, {1})\n".format(x,ec_enable_sta[x].get("sta_type","MethodObj")) + # Walk them again and add the _STAs + for x in ec_to_patch: + ssdt += """ + Scope ([[ECName]]) + { + Method (_STA, 0, NotSerialized) // _STA: Status + { + If (_OSI ("Darwin")) + { + Return (0) + } + Else + { + Return ([[XSTA]]) + } + } + } +""".replace("[[LPCName]]",lpc_name).replace("[[ECName]]",x) \ + .replace("[[XSTA]]","{}.XSTA{}".format(x," ()" if ec_sta[x].get("sta_type","MethodObj")=="MethodObj" else "") if x in ec_sta else "0x0F") + # Walk them yet again - and force enable as needed + for x in ec_to_enable: + ssdt += """ + If (LAnd (CondRefOf ([[ECName]].XSTA), LNot (CondRefOf ([[ECName]]._STA)))) + { + Scope ([[ECName]]) + { + Method (_STA, 0, NotSerialized) // _STA: Status + { + If (_OSI ("Darwin")) + { + Return (0x0F) + } + Else + { + Return ([[XSTA]]) + } + } + } + } +""".replace("[[LPCName]]",lpc_name).replace("[[ECName]]",x) \ + .replace("[[XSTA]]","{}.XSTA{}".format(x," ()" if ec_enable_sta[x].get("sta_type","MethodObj")=="MethodObj" else "") if x in ec_enable_sta else "Zero") + # Create the faked EC + if not laptop or not named_ec: + ssdt += """ + Scope ([[LPCName]]) + { + Device (EC) + { + Name (_HID, "ACID0001") // _HID: Hardware ID + Method (_STA, 0, NotSerialized) // _STA: Status + { + If (_OSI ("Darwin")) + { + Return (0x0F) + } + Else + { + Return (Zero) + } + } + } + }""".replace("[[LPCName]]",lpc_name) + # Close the SSDT scope + ssdt += """ +}""" + self.write_ssdt("SSDT-EC",ssdt) + print("") + print("Done.") + self.patch_warn() + self.u.grab("Press [enter] to return...") + + def plugin_type(self): + if not self.ensure_dsdt(allow_any=True): + return + self.u.head("Plugin Type") + print("") + print("Determining CPU name scheme...") + for table_name in self.sorted_nicely(list(self.d.acpi_tables)): + ssdt_name = "SSDT-PLUG" + table = self.d.acpi_tables[table_name] + if not table.get("signature") in (b"DSDT",b"SSDT"): + continue # We're not checking data tables + print(" Checking {}...".format(table_name)) + try: cpu_name = self.d.get_processor_paths(table=table)[0][0] + except: cpu_name = None + if cpu_name: + print(" - Found Processor: {}".format(cpu_name)) + oc = {"Comment":"Sets plugin-type to 1 on first Processor object","Enabled":True,"Path":ssdt_name+".aml"} + print("Creating SSDT-PLUG...") + ssdt = """// +// Based on the sample found at https://github.com/acidanthera/OpenCorePkg/blob/master/Docs/AcpiSamples/SSDT-PLUG.dsl +// +DefinitionBlock ("", "SSDT", 2, "CORP", "CpuPlug", 0x00003000) +{ + External ([[CPUName]], ProcessorObj) + Scope ([[CPUName]]) + { + Method (_DSM, 4, NotSerialized) // _DSM: Device-Specific Method + { + If (_OSI ("Darwin")) + { + If (LNot (Arg2)) + { + Return (Buffer (One) + { + 0x03 + }) + } + Return (Package (0x02) + { + "plugin-type", + One + }) + } + Else + { + Return (Buffer (One) + { + Zero + }) + } + } + } +}""".replace("[[CPUName]]",cpu_name) + else: + ssdt_name += "-ALT" + print(" - No Processor objects found...") + procs = self.d.get_device_paths_with_hid(hid="ACPI0007",table=table) + if not procs: + print(" - No ACPI0007 devices found...") + continue + print(" - Located {:,} ACPI0007 device{}".format( + len(procs), "" if len(procs)==1 else "s" + )) + parent = procs[0][0].split(".")[0] + print(" - Got parent at {}, iterating...".format(parent)) + proc_list = [] + for proc in procs: + print(" - Checking {}...".format(proc[0].split(".")[-1])) + uid = self.d.get_path_of_type(obj_type="Name",obj=proc[0]+"._UID",table=table) + if not uid: + print(" --> Not found! Skipping...") + continue + # Let's get the actual _UID value + try: + _uid = table["lines"][uid[0][1]].split("_UID, ")[1].split(")")[0] + print(" --> _UID: {}".format(_uid)) + proc_list.append((proc[0],_uid)) + except: + print(" --> Not found! Skipping...") + if not proc_list: + continue + print("Iterating {:,} valid processor device{}...".format(len(proc_list),"" if len(proc_list)==1 else "s")) + ssdt = """// +// Based on the sample found at https://github.com/acidanthera/OpenCorePkg/blob/master/Docs/AcpiSamples/Source/SSDT-PLUG-ALT.dsl +// +DefinitionBlock ("", "SSDT", 2, "CORP", "CpuPlugA", 0x00003000) +{ + External ([[parent]], DeviceObj) + + Scope ([[parent]]) + {""".replace("[[parent]]",parent) + # Ensure our name scheme won't conflict + schemes = ("C000","CP00","P000","PR00","CX00","PX00") + # Walk the processor objects, and add them to the SSDT + for i,proc_uid in enumerate(proc_list): + proc,uid = proc_uid + adr = hex(i)[2:].upper() + name = None + for s in schemes: + name_check = s[:-len(adr)]+adr + check_path = "{}.{}".format(parent,name_check) + if self.d.get_path_of_type(obj_type="Device",obj=check_path,table=table): + continue # Already defined - skip + # If we got here - we found an unused name + name = name_check + break + if not name: + print(" - Could not find an available name scheme! Aborting.") + print("") + self.u.grab("Press [enter] to return to main menu...") + return + ssdt+=""" + Processor ([[name]], [[uid]], 0x00000510, 0x06) + { + // [[proc]] + Name (_HID, "ACPI0007" /* Processor Device */) // _HID: Hardware ID + Name (_UID, [[uid]]) + Method (_STA, 0, NotSerialized) // _STA: Status + { + If (_OSI ("Darwin")) + { + Return (0x0F) + } + Else + { + Return (Zero) + } + }""".replace("[[name]]",name).replace("[[uid]]",uid).replace("[[proc]]",proc) + if i == 0: # Got the first, add plugin-type as well + ssdt += """ + Method (_DSM, 4, NotSerialized) + { + If (LNot (Arg2)) { + Return (Buffer (One) { 0x03 }) + } + + Return (Package (0x02) + { + "plugin-type", + One + }) + }""" + # Close up the SSDT + ssdt += """ + }""" + ssdt += """ + } +}""" + oc = {"Comment":"Redefines modern CPU Devices as legacy Processor objects and sets plugin-type to 1 on the first","Enabled":True,"Path":ssdt_name+".aml"} + self.make_plist(oc, ssdt_name+".aml", ()) + self.write_ssdt(ssdt_name,ssdt) + print("") + print("Done.") + self.patch_warn() + self.u.grab("Press [enter] to return...") + return + # If we got here - we reached the end + print("No valid processor devices found!") + print("") + self.u.grab("Press [enter] to return...") + return + + def list_irqs(self): + # Walks the DSDT keeping track of the current device and + # saving the IRQNoFlags if found + devices = {} + current_device = None + current_hid = None + irq = False + last_irq = False + irq_index = 0 + for index,line in enumerate(self.d.get_dsdt_or_only()["lines"]): + if self.d.is_hex(line): + # Skip all hex lines + continue + if irq: + # Get the values + num = line.split("{")[1].split("}")[0].replace(" ","") + num = "#" if not len(num) else num + if current_device in devices: + if last_irq: # In a row + devices[current_device]["irq"] += ":"+num + else: # Skipped at least one line + irq_index = self.d.find_next_hex(index)[1] + devices[current_device]["irq"] += "-"+str(irq_index)+"|"+num + else: + irq_index = self.d.find_next_hex(index)[1] + devices[current_device] = {"irq":str(irq_index)+"|"+num} + irq = False + last_irq = True + elif "Device (" in line: + # Check if we retain the _HID here + if current_device and current_device in devices and current_hid: + # Save it + devices[current_device]["hid"] = current_hid + last_irq = False + current_hid = None + try: current_device = line.split("(")[1].split(")")[0] + except: + current_device = None + continue + elif "_HID, " in line and current_device: + try: current_hid = line.split('"')[1] + except: pass + elif "IRQNoFlags" in line and current_device: + # Next line has our interrupts + irq = True + # Check if just a filler line + elif len(line.replace("{","").replace("}","").replace("(","").replace(")","").replace(" ","").split("//")[0]): + # Reset last IRQ as it's not in a row + last_irq = False + # Retain the final _HID if needed + if current_device and current_device in devices and current_hid: + devices[current_device]["hid"] = current_hid + return devices + + def get_hex_from_irqs(self, irq, rem_irq = None): + # We need to search for a few different types: + # + # 22 XX XX 22 XX XX 22 XX XX (multiples on different lines) + # 22 XX XX (summed multiples in the same bracket - {0,8,11}) + # 22 XX XX (single IRQNoFlags entry) + # + # Can end with 79 [00] (end of method), 86 09 (middle of method) or 47 01 (unknown) + lines = [] + remd = [] + for a in irq.split("-"): + index,i = a.split("|") # Get the index + index = int(index) + find = self.get_int_for_line(i) + repl = [0]*len(find) + # Now we need to verify if we're patching *all* IRQs, or just some specifics + if rem_irq: + repl = [x for x in find] + matched = [] + for x in rem_irq: + # Get the int + rem = self.convert_irq_to_int(x) + repl1 = [y&(rem^0xFFFF) if y >= rem else y for y in repl] + if repl1 != repl: + # Changes were made + remd.append(x) + repl = [y for y in repl1] + # Get the hex + d = { + "irq":i, + "find": "".join(["22"+self.d.get_hex_from_int(x) for x in find]), + "repl": "".join(["22"+self.d.get_hex_from_int(x) for x in repl]), + "remd": remd, + "index": index + } + d["changed"] = not (d["find"]==d["repl"]) + lines.append(d) + return lines + + def get_int_for_line(self, irq): + irq_list = [] + for i in irq.split(":"): + irq_list.append(self.same_line_irq(i)) + return irq_list + + def convert_irq_to_int(self, irq): + b = "0"*(16-irq)+"1"+"0"*(irq) + return int(b,2) + + def same_line_irq(self, irq): + # We sum the IRQ values and return the int + total = 0 + for i in irq.split(","): + if i == "#": + continue # Null value + try: i=int(i) + except: continue # Not an int + if i > 15 or i < 0: + continue # Out of range + total = total | self.convert_irq_to_int(i) + return total + + def get_all_irqs(self, irq): + irq_list = set() + for a in irq.split("-"): + i = a.split("|")[1] + for x in i.split(":"): + for y in x.split(","): + if y == "#": + continue + irq_list.add(int(y)) + return sorted(list(irq_list)) + + def get_data(self, data, pad_to=0): + if sys.version_info >= (3, 0) and not isinstance(data,bytes): + data = data.encode() + return plist.wrap_data(data+b"\x00"*(max(pad_to-len(data),0))) + + def _get_table_id(self, table, id_name, mode=None): + if mode is None: + mode = self.match_mode + if table is None: + # No table found - return 0s as a failsafe + mode = 0 + # 0 = Any table id, any length + # 1 = Any table id, match length + # 2 = Match table id, match length + # 3 = Match NORMALIZED table id, match length + zero = self.d.get_hex_bytes("00" * (8 if id_name == "id" else 4)) + if mode == 2: + return table.get(id_name,zero) + elif mode == 3: + return table.get(id_name+"_ascii",table.get(id_name,zero)) + else: # 0/1 match any table id + return zero + + def _get_table_length(self, table, mode=None): + if mode is None: + mode = self.match_mode + if table is None or mode not in (1,2,3): + # No table found, or we're zeroing the + # length - just return 0 + return 0 + # If mode is not 0, we return the table + # length + return table.get("length",0) + + def get_clover_patch(self, patch): + return { + "Comment": patch["Comment"], + "Disabled": patch.get("Disabled",False), + "Find": self.get_data(self.d.get_hex_bytes(patch["Find"])), + "Replace": self.get_data(self.d.get_hex_bytes(patch["Replace"])) + } + + def get_oc_patch(self, patch): + table = patch.get("Table",self.d.get_dsdt_or_only()) + if not isinstance(table,dict): + table = {} + return { + "Base": patch.get("Base",""), + "BaseSkip": patch.get("BaseSkip",0), + "Comment": patch.get("Comment",""), + "Count": patch.get("Count",0), + "Enabled": patch.get("Enabled",True), + "Find": self.get_data(self.d.get_hex_bytes(patch["Find"])), + "Limit": patch.get("Limit",0), + "Mask": self.get_data(patch.get("Mask",b"")), + "OemTableId": self.get_data(patch.get("TableId",self._get_table_id(table,"id")),pad_to=8), + "Replace": self.get_data(self.d.get_hex_bytes(patch["Replace"])), + "ReplaceMask": self.get_data(patch.get("ReplaceMask",b"")), + "Skip": patch.get("Skip",0), + "TableLength": patch.get("Length",self._get_table_length(table)), + "TableSignature": self.get_data(patch.get("Signature",self._get_table_id(table,"signature")),pad_to=4) + } + + def get_oc_drop(self, drop): + table = drop.get("Table") + # Cannot accept None for a table to drop + table = table or self.d.get_dsdt_or_only() + assert table + oc = { + "All": drop.get("All",False), + "Comment": drop.get("Comment",""), + "Enabled": drop.get("Enabled",True), + "OemTableId": self.get_data(drop.get("TableId",self._get_table_id(table,"id")),pad_to=8), + "TableLength": drop.get("Length",self._get_table_length(table)), + "TableSignature": self.get_data(drop.get("Signature",self._get_table_id(table,"signature")),pad_to=4) + } + # Ensure at least one of TableLength, OemTableId, or TableSignature is non-zero + def _int(val): + return val if isinstance(val,int) else sum([int(x) for x in val]) + if sum(_int(oc[x]) for x in ("TableLength","OemTableId","TableSignature")) == 0: + raise Exception("TableLength, OemTableId, and TableSignature cannot all be zeroes.") + return oc + + def get_clover_drop(self, drop): + table = drop.get("Table") + # Cannot accept None for a table to drop + table = table or self.d.get_dsdt_or_only() + leng = self._get_table_length(table) + d = { + # Strip null chars and then decode to strings + "Signature": table["signature"].rstrip(b"\x00").decode(), + "TableId": table["id"].rstrip(b"\x00").decode(), + } + # Only add the length if we have a valid value for it + length = drop.get("Length",leng) + if length: d["Length"] = length + return d + + def get_irq_choice(self, irqs): + if not irqs or not isinstance(irqs,dict): + # No IRQNoFlags entries located - or irqs isn't + # a dict - just return the same value we would if + # the user chose N. None + return {} + hid_pad = max((len(irqs[x].get("hid","")) for x in irqs)) + names_and_hids = ["PIC","IPIC","TMR","TIMR","RTC","RTC0","RTC1","PNPC0000","PNP0100","PNP0B00"] + defaults = [x for x in irqs if x.upper() in names_and_hids or irqs[x].get("hid","").upper() in names_and_hids] + while True: + lines = [""] + lines.append("Current Legacy IRQs:") + lines.append("") + if not len(irqs): + lines.append(" - None Found") + for x in irqs: + if not hid_pad: + lines.append(" {} {}: {}".format( + "*" if x.upper() in names_and_hids else " ", + x.rjust(4," "), + self.get_all_irqs(irqs[x]["irq"]) + )) + else: + lines.append(" {} {} {}: {}".format( + "*" if x.upper() in names_and_hids or irqs[x].get("hid","").upper() in names_and_hids else " ", + x.rjust(4," "), + ("- "+irqs[x].get("hid","").rjust(hid_pad," ")) if irqs[x].get("hid") else "".rjust(hid_pad+2," "), + self.get_all_irqs(irqs[x]["irq"]) + )) + lines.append("") + lines.append("C. Only Conflicting IRQs from Legacy Devices ({} from * devices)".format(",".join([str(x) for x in self.target_irqs]) if len(self.target_irqs) else "None")) + lines.append("O. Only Conflicting IRQs ({})".format(",".join([str(x) for x in self.target_irqs]) if len(self.target_irqs) else "None")) + lines.append("L. Legacy IRQs (from * devices)") + lines.append("N. None") + lines.append("") + lines.append("M. Main Menu") + lines.append("Q. Quit") + lines.append("") + lines.append("* Indicates a typically troublesome device") + lines.append("You can also type your own list of Devices and IRQs") + lines.append("The format is DEV1:IRQ1,IRQ2 DEV2:IRQ3,IRQ4") + lines.append("You can omit the IRQ# to remove all from that device (DEV1: DEV2:1,2,3)") + lines.append("For example, to remove IRQ 0 from RTC, all from IPIC, and 8 and 11 from TMR:\n") + lines.append("RTC:0 IPIC: TMR:8,11") + lines.append("") + max_line = max(lines,key=len) + if self.resize_window: + self.u.resize(max(len(max_line),self.w), max(len(lines)+5,self.h)) + self.u.head("Select IRQs To Nullify") + print("\n".join(lines)) + menu = self.u.grab("Please select an option (default is C): ") + if not len(menu): + menu = "c" + if menu.lower() == "m": return None + elif menu.lower() == "q": + if self.resize_window: + self.u.resize(self.w,self.h) + self.u.custom_quit() + d = {} + if menu.lower() == "n": + pass # Don't populate d at all + elif menu.lower() == "o": + for x in irqs: + d[x] = self.target_irqs + elif menu.lower() == "l": + for x in defaults: + d[x] = [] + elif menu.lower() == "c": + for x in defaults: + d[x] = self.target_irqs + else: + # User supplied + for i in menu.split(" "): + if not len(i): + continue + try: + name,val = i.split(":") + val = [int(x) for x in val.split(",") if len(x)] + except Exception as e: + # Incorrectly formatted + print("!! Incorrect Custom IRQ List Format !!\n - {}".format(e)) + d = None + break + d[name.upper()] = val + if d is None: + continue + if self.resize_window: + self.u.resize(self.w,self.h) + return d + + def fix_hpet(self): + if not self.ensure_dsdt(): + return + self.u.head("Fix HPET") + print("") + print("Locating PNP0103 (HPET) devices...") + hpets = self.d.get_device_paths_with_hid("PNP0103") + hpet_fake = not hpets + patches = [] + hpet_sta = False + sta = None + if hpets: + name = hpets[0][0] + print(" - Located at {}".format(name)) + # Let's locate any _STA methods + sta = self.get_sta_var(var=None,dev_hid="PNP0103",dev_name="HPET") + if sta.get("patches"): + hpet_sta = True + patches.extend(sta.get("patches",[])) + print("Locating HPET's _CRS Method/Name...") + hpet = self.d.get_method_paths(name+"._CRS") or self.d.get_name_paths(name+"._CRS") + if not hpet: + print(" - Could not locate {}._CRS! Aborting!".format(name)) + # Check for XCRS to see if the rename is already applied + if self.d.get_method_paths(name+".XCRS") or self.d.get_name_paths(name+".XCRS"): + print(" --> Appears to already be named XCRS!") + print("") + self.u.grab("Press [enter] to return to main menu...") + return + print(" - Located at {}._CRS".format(name)) + crs_index = self.d.find_next_hex(hpet[0][1])[1] + print(" - Found at index {}".format(crs_index)) + print(" - Type: {}".format(hpet[0][-1])) + # Let's find the Memory32Fixed portion within HPET's _CRS method + print(" - Checking for Memory32Fixed...") + mem_access = mem_base = mem_length = primed = None + for line in self.d.get_scope(hpets[0][1],strip_comments=True): + if "Memory32Fixed (" in line: + try: + mem_access = line.split("(")[1].split(",")[0] + except: + print(" --> Could not determine memory access type!") + break + primed = True + continue + if not primed: + continue + elif ")" in line: # Reached the end of the scope + break + # We're primed, and not at the end - let's try to get the base and length + try: + val = line.strip().split(",")[0].replace("Zero","0x0").replace("One","0x1") + check = int(val,16) + except: + # Couldn't convert to an int - likely using vars, fall back to defaults + print(" --> Could not convert Base or Length to Integer!") + break + # Set them in order + if mem_base is None: + mem_base = val + else: + mem_length = val + break # Leave after we get both values + # Check if we found the values + got_mem = mem_access and mem_base and mem_length + if got_mem: + print(" --> Got {} {} -> {}".format(mem_access,mem_base,mem_length)) + else: + mem_access = "ReadWrite" + mem_base = "0xFED00000" + mem_length = "0x00000400" + print(" --> Not located!") + print(" --> Using defaults {} -> {}".format(mem_base,mem_length)) + crs = "5F435253" + xcrs = "58435253" + padl,padr = self.d.get_shortest_unique_pad(crs, crs_index) + patches.append({ + "Comment":"{} _CRS to XCRS Rename".format(name.split(".")[-1].lstrip("\\")), + "Find":padl+crs+padr, + "Replace":padl+xcrs+padr + }) + else: + print(" - None located!") + name = self.get_lpc_name(skip_ec=True,skip_common_names=True) + if name is None: + self.u.grab("Press [enter] to return to main menu...") + return + devs = self.list_irqs() + target_irqs = self.get_irq_choice(devs) + if target_irqs is None: return # Bailed, going to the main menu + self.u.head("Creating IRQ Patches") + print("") + if sta and sta.get("patches"): + print(" - {} _STA to XSTA Rename:".format(sta["dev_name"])) + print(" Find: {}".format(sta["patches"][0]["Find"])) + print(" Replace: {}".format(sta["patches"][0]["Replace"])) + print("") + if not hpet_fake: + print(" - {} _CRS to XCRS Rename:".format(name.split(".")[-1].lstrip("\\"))) + print(" Find: {}".format(padl+crs+padr)) + print(" Replace: {}".format(padl+xcrs+padr)) + print("") + print("Checking IRQs...") + print("") + if not devs: + print(" - Nothing to patch!") + print("") + # Let's apply patches as we go + saved_dsdt = self.d.get_dsdt_or_only()["raw"] + unique_patches = {} + generic_patches = [] + for dev in devs: + if not dev in target_irqs: + continue + irq_patches = self.get_hex_from_irqs(devs[dev]["irq"],target_irqs[dev]) + i = [x for x in irq_patches if x["changed"]] + for a,t in enumerate(i): + if not t["changed"]: + # Nothing patched - skip + continue + # Try our endings here - 7900, 8609, and 4701 - also allow for up to 8 chars of pad (thanks MSI) + matches = re.findall("("+t["find"]+"(.{0,8})(7900|4701|8609))",self.d.get_hex_starting_at(t["index"])[0]) + if not len(matches): + print("Missing IRQ Patch ending for {} ({})! Skipping...".format(dev,t["find"])) + continue + if len(matches) > 1: + # Found too many matches! + # Add them all as find/replace entries + for x in matches: + generic_patches.append({ + "remd":",".join([str(y) for y in set(t["remd"])]), + "orig":t["find"], + "find":t["find"]+"".join(x[1:]), + "repl":t["repl"]+"".join(x[1:]) + }) + continue + ending = "".join(matches[0][1:]) + padl,padr = self.d.get_shortest_unique_pad(t["find"]+ending, t["index"]) + t_patch = padl+t["find"]+ending+padr + r_patch = padl+t["repl"]+ending+padr + if not dev in unique_patches: + unique_patches[dev] = [] + unique_patches[dev].append({ + "dev":dev, + "remd":",".join([str(y) for y in set(t["remd"])]), + "orig":t["find"], + "find":t_patch, + "repl":r_patch + }) + # Walk the unique patches if any + if len(unique_patches): + for x in unique_patches: + for i,p in enumerate(unique_patches[x]): + patch_name = "{} IRQ {} Patch".format(x, p["remd"]) + if len(unique_patches[x]) > 1: + patch_name += " - {} of {}".format(i+1, len(unique_patches[x])) + patches.append({ + "Comment":patch_name, + "Find":p["find"], + "Replace":p["repl"] + }) + print(" - {}".format(patch_name)) + print(" Find: {}".format(p["find"])) + print(" Replace: {}".format(p["repl"])) + print("") + # Walk the generic patches if any + if len(generic_patches): + generic_set = [] # Make sure we don't repeat find values + for x in generic_patches: + if x in generic_set: + continue + generic_set.append(x) + print("The following may not be unique and are disabled by default!") + print("") + for i,x in enumerate(generic_set): + patch_name = "Generic IRQ Patch {} of {} - {} - {}".format(i+1,len(generic_set),x["remd"],x["orig"]) + patches.append({ + "Comment":patch_name, + "Find":x["find"], + "Replace":x["repl"], + "Disabled":True, + "Enabled":False }) + print(" - {}".format(patch_name)) + print(" Find: {}".format(x["find"])) + print(" Replace: {}".format(x["repl"])) + print("") + # Restore the original DSDT in memory + self.d.get_dsdt_or_only()["raw"] = saved_dsdt + oc = { + "Comment":"HPET Device Fake" if hpet_fake else "{} _CRS (Needs _CRS to XCRS Rename)".format(name.split(".")[-1].lstrip("\\")), + "Enabled":True, + "Path":"SSDT-HPET.aml" + } + self.make_plist(oc, "SSDT-HPET.aml", patches) + print("Creating SSDT-HPET...") + if hpet_fake: + ssdt = """// Fake HPET device +// +DefinitionBlock ("", "SSDT", 2, "CORP", "HPET", 0x00000000) +{ + External ([[name]], DeviceObj) + + Scope ([[name]]) + { + Device (HPET) + { + Name (_HID, EisaId ("PNP0103") /* HPET System Timer */) // _HID: Hardware ID + Name (_CID, EisaId ("PNP0C01") /* System Board */) // _CID: Compatible ID + Method (_STA, 0, NotSerialized) // _STA: Status + { + If (_OSI ("Darwin")) + { + Return (0x0F) + } + Else + { + Return (Zero) + } + } + Name (_CRS, ResourceTemplate () // _CRS: Current Resource Settings + { + // Only choose 0, and 8 to mimic a real mac's DSDT. + // You may optionally want to add 11 and/or 12 here if + // it does not work as expected - though those may have + // other side effects (broken trackpad or otherwise). + IRQNoFlags () + {0,8} + Memory32Fixed (ReadWrite, // Access Type + 0xFED00000, // Address Base + 0x00000400, // Address Length + ) + }) + } + } +}""".replace("[[name]]",name) + else: + ssdt = """// +// Supplementary HPET _CRS from Goldfish64 +// Requires at least the HPET's _CRS to XCRS rename +// +DefinitionBlock ("", "SSDT", 2, "CORP", "HPET", 0x00000000) +{ + External ([[name]], DeviceObj) + External ([[name]].XCRS, [[type]]) + + Scope ([[name]]) + { + Name (BUFX, ResourceTemplate () + { + // Only choose 0, and 8 to mimic a real mac's DSDT. + // You may optionally want to add 11 and/or 12 here if + // it does not work as expected - though those may have + // other side effects (broken trackpad or otherwise). + IRQNoFlags () + {0,8} + // [[mem]] + Memory32Fixed ([[mem_access]], // Access Type + [[mem_base]], // Address Base + [[mem_length]], // Address Length + ) + }) + Method (_CRS, 0, Serialized) // _CRS: Current Resource Settings + { + // Return our buffer if booting macOS or the XCRS method + // no longer exists for some reason + If (LOr (_OSI ("Darwin"), LNot(CondRefOf ([[name]].XCRS)))) + { + Return (BUFX) + } + // Not macOS and XCRS exists - return its result + Return ([[name]].XCRS[[method]]) + }""" \ + .replace("[[name]]",name) \ + .replace("[[type]]","MethodObj" if hpet[0][-1] == "Method" else "BuffObj") \ + .replace("[[mem]]","AccessType/Base/Length pulled from DSDT" if got_mem else "Default AccessType/Base/Length - verify with your DSDT!") \ + .replace("[[mem_access]]",mem_access) \ + .replace("[[mem_base]]",mem_base) \ + .replace("[[mem_length]]",mem_length) \ + .replace("[[method]]"," ()" if hpet[0][-1]=="Method" else "") + if hpet_sta: + # Inject our external reference to the renamed XSTA method + ssdt_parts = [] + external = False + for line in ssdt.split("\n"): + if "External (" in line: external = True + elif external: + ssdt_parts.append(" External ({}.XSTA, {})".format(name,sta["sta_type"])) + external = False + ssdt_parts.append(line) + ssdt = "\n".join(ssdt_parts) + # Add our method + ssdt += """ + Method (_STA, 0, NotSerialized) // _STA: Status + { + // Return 0x0F if booting macOS or the XSTA method + // no longer exists for some reason + If (LOr (_OSI ("Darwin"), LNot (CondRefOf ([[name]].XSTA)))) + { + Return (0x0F) + } + // Not macOS and XSTA exists - return its result + Return ([[name]].XSTA[[called]]) + }""".replace("[[name]]",name).replace("[[called]]"," ()" if sta["sta_type"]=="MethodObj" else "") + ssdt += """ + } +}""" + self.write_ssdt("SSDT-HPET",ssdt) + print("") + print("Done.") + self.patch_warn() + self.u.grab("Press [enter] to return...") + + def ssdt_pmc(self): + if not self.ensure_dsdt(): + return + self.u.head("SSDT PMC") + print("") + lpc_name = self.get_lpc_name() + if lpc_name is None: + self.u.grab("Press [enter] to return to main menu...") + return + oc = {"Comment":"PMCR for native 300-series NVRAM","Enabled":True,"Path":"SSDT-PMC.aml"} + self.make_plist(oc, "SSDT-PMC.aml", ()) + print("Creating SSDT-PMC...") + ssdt = """// +// SSDT-PMC source from Acidanthera +// Original found here: https://github.com/acidanthera/OpenCorePkg/blob/master/Docs/AcpiSamples/SSDT-PMC.dsl +// +// Uses the CORP name to denote where this was created for troubleshooting purposes. +// +DefinitionBlock ("", "SSDT", 2, "CORP", "PMCR", 0x00001000) +{ + External ([[LPCName]], DeviceObj) + Scope ([[LPCName]]) + { + Device (PMCR) + { + Name (_HID, EisaId ("APP9876")) // _HID: Hardware ID + Method (_STA, 0, NotSerialized) // _STA: Status + { + If (_OSI ("Darwin")) + { + Return (0x0B) + } + Else + { + Return (Zero) + } + } + Name (_CRS, ResourceTemplate () // _CRS: Current Resource Settings + { + Memory32Fixed (ReadWrite, + 0xFE000000, // Address Base + 0x00010000, // Address Length + ) + }) + } + } +}""".replace("[[LPCName]]",lpc_name) + self.write_ssdt("SSDT-PMC",ssdt) + print("") + print("Done.") + self.patch_warn() + self.u.grab("Press [enter] to return...") + + def get_sta_var(self,var="STAS",device=None,dev_hid="ACPI000E",dev_name="AWAC",log_locate=True,table=None): + # Helper to check for a device, check for (and qualify) an _STA method, + # and look for a specific variable in the _STA scope + # + # Returns a dict with device info - only "valid" parameter is + # guaranteed. + table = table or self.d.get_dsdt_or_only() + has_var = False + patches = [] + root = None + if device: + dev_list = self.d.get_device_paths(device,table=table) + if not len(dev_list): + if log_locate: print(" - Could not locate {}".format(device)) + return {"value":False} + else: + if log_locate: print("Locating {} ({}) devices...".format(dev_hid,dev_name)) + dev_list = self.d.get_device_paths_with_hid(dev_hid,table=table) + if not len(dev_list): + if log_locate: print(" - Could not locate any {} devices".format(dev_hid)) + return {"valid":False} + dev = dev_list[0] + if log_locate: print(" - Found {}".format(dev[0])) + root = dev[0].split(".")[0] + print(" --> Verifying _STA...") + # Check Method first - then Name + sta_type = "MethodObj" + sta = self.d.get_method_paths(dev[0]+"._STA",table=table) + xsta = self.d.get_method_paths(dev[0]+".XSTA",table=table) + if not sta and not xsta: + # Check for names + sta_type = "IntObj" + sta = self.d.get_name_paths(dev[0]+"._STA",table=table) + xsta = self.d.get_name_paths(dev[0]+".XSTA",table=table) + if xsta and not sta: + print(" --> _STA already renamed to XSTA! Skipping other checks...") + print(" Please disable _STA to XSTA renames for this device, reboot, and try again.") + print("") + return {"valid":False,"break":True,"device":dev,"dev_name":dev_name,"dev_hid":dev_hid,"sta_type":sta_type} + if sta: + if var: + scope = "\n".join(self.d.get_scope(sta[0][1],strip_comments=True,table=table)) + has_var = var in scope + print(" --> {} {} variable".format("Has" if has_var else "Does NOT have",var)) + else: + print(" --> No _STA method/name found") + # Let's find out of we need a unique patch for _STA -> XSTA + if sta and not has_var: + print(" --> Generating _STA to XSTA rename") + sta_index = self.d.find_next_hex(sta[0][1],table=table)[1] + print(" ----> Found at index {}".format(sta_index)) + sta_hex = "5F535441" # _STA + xsta_hex = "58535441" # XSTA + padl,padr = self.d.get_shortest_unique_pad(sta_hex,sta_index,table=table) + patches.append({ + "Comment":"{} _STA to XSTA Rename".format(dev_name), + "Find":padl+sta_hex+padr, + "Replace":padl+xsta_hex+padr, + "Table":table + }) + return {"valid":True,"has_var":has_var,"sta":sta,"patches":patches,"device":dev,"dev_name":dev_name,"dev_hid":dev_hid,"root":root,"sta_type":sta_type} + + def ssdt_awac(self): + if not self.ensure_dsdt(): + return + self.u.head("SSDT RTCAWAC") + print("") + rtc_range_needed = False + rtc_crs_type = None + crs_lines = [] + lpc_name = None + awac_dict = self.get_sta_var(var="STAS",dev_hid="ACPI000E",dev_name="AWAC") + rtc_dict = self.get_sta_var(var="STAS",dev_hid="PNP0B00",dev_name="RTC") + # At this point - we should have any info about our AWAC and RTC devices + # we need. Let's see if we need an RTC fake - then build the SSDT. + if not rtc_dict.get("valid"): + print(" - Fake needed!") + lpc_name = self.get_lpc_name() + if lpc_name is None: + self.u.grab("Press [enter] to return to main menu...") + return + else: + # Let's check if our RTC device has a _CRS variable - and if so, let's look for any skipped ranges + print(" --> Checking for _CRS...") + rtc_crs = self.d.get_method_paths(rtc_dict["device"][0]+"._CRS") or self.d.get_name_paths(rtc_dict["device"][0]+"._CRS") + if rtc_crs: + print(" ----> {}".format(rtc_crs[0][0])) + rtc_crs_type = "MethodObj" if rtc_crs[0][-1] == "Method" else "BuffObj" + # Only check for the range if it's a buffobj + if not rtc_crs_type.lower() == "buffobj": + print(" --> _CRS is a Method - cannot verify RTC range!") + else: + print(" --> _CRS is a Buffer - checking RTC range...") + last_adr = last_len = last_ind = None + crs_scope = self.d.get_scope(rtc_crs[0][1]) + # Let's try and clean up the scope - it's often a jumbled mess + pad_len = len(crs_scope[0])-len(crs_scope[0].lstrip()) + pad = crs_scope[0][:pad_len] + fixed_scope = [] + for line in crs_scope: + if line.startswith(pad): # Got a full line - strip the pad, and save it + fixed_scope.append(line[pad_len:]) + else: # Likely a part of the prior line + fixed_scope[-1] = fixed_scope[-1]+line + for i,line in enumerate(fixed_scope): + if "Name (_CRS, " in line: + # Rename _CRS to BUFX for later - and strip any comments to avoid confusion + line = line.replace("Name (_CRS, ","Name (BUFX, ").split(" //")[0] + if "IO (Decode16," in line: + # We have our start - get the the next line, and 4th line + try: + curr_adr = int(fixed_scope[i+1].strip().split(",")[0],16) + curr_len = int(fixed_scope[i+4].strip().split(",")[0],16) + curr_ind = i+4 # Save the value we may pad + except: # Bad values? Bail... + print(" ----> Failed to gather values - could not verify RTC range.") + rtc_range_needed = False + break + if last_adr is not None: # Compare our range values + adjust = curr_adr - (last_adr + last_len) + if adjust: # We need to increment the previous length by our adjust value + rtc_range_needed = True + print(" ----> Adjusting IO range {} length to {}".format(self.hexy(last_adr,pad_to=4),self.hexy(last_len+adjust,pad_to=2))) + try: + hex_find,hex_repl = self.hexy(last_len,pad_to=2),self.hexy(last_len+adjust,pad_to=2) + crs_lines[last_ind] = crs_lines[last_ind].replace(hex_find,hex_repl) + except: + print(" ----> Failed to adjust values - could not verify RTC range.") + rtc_range_needed = False + break + # Save our last values + last_adr,last_len,last_ind = curr_adr,curr_len,curr_ind + crs_lines.append(line) + if rtc_range_needed: # We need to generate a rename for _CRS -> XCRS + print(" --> Generating _CRS to XCRS rename...") + crs_index = self.d.find_next_hex(rtc_crs[0][1])[1] + print(" ----> Found at index {}".format(crs_index)) + crs_hex = "5F435253" # _CRS + xcrs_hex = "58435253" # XCRS + padl,padr = self.d.get_shortest_unique_pad(crs_hex, crs_index) + patches = rtc_dict.get("patches",[]) + patches.append({ + "Comment":"{} _CRS to XCRS Rename".format(rtc_dict["dev_name"]), + "Find":padl+crs_hex+padr, + "Replace":padl+xcrs_hex+padr + }) + rtc_dict["patches"] = patches + rtc_dict["crs"] = True + else: + print(" ----> Not found") + # Let's see if we even need an SSDT + # Not required if AWAC is not present; RTC is present, doesn't have an STAS var, and doesn't have an _STA method, and no range fixes are needed + if not awac_dict.get("valid") and rtc_dict.get("valid") and not rtc_dict.get("has_var") and not rtc_dict.get("sta") and not rtc_range_needed: + print("") + print("Valid PNP0B00 (RTC) device located and qualified, and no ACPI000E (AWAC) devices found.") + print("No patching or SSDT needed.") + print("") + self.u.grab("Press [enter] to return to main menu...") + return + comment = "Incompatible AWAC Fix" if awac_dict.get("valid") else "RTC Fake" if not rtc_dict.get("valid") else "RTC Range Fix" if rtc_range_needed else "RTC Enable Fix" + suffix = [] + for x in (awac_dict,rtc_dict): + if not x.get("valid"): continue + val = "" + if x.get("sta") and not x.get("has_var"): + val = "{} _STA to XSTA".format(x["dev_name"]) + if x.get("crs"): + val += "{} _CRS to XCRS".format(" and " if val else x["dev_name"]) + if val: suffix.append(val) + if suffix: + comment += " - Requires {} Rename".format(", ".join(suffix)) + # At this point - we need to do the following: + # 1. Change STAS if needed + # 2. Setup _STA with _OSI and call XSTA if needed + # 3. Fake RTC if needed + oc = {"Comment":comment,"Enabled":True,"Path":"SSDT-RTCAWAC.aml"} + self.make_plist(oc, "SSDT-RTCAWAC.aml", awac_dict.get("patches",[])+rtc_dict.get("patches",[]), replace=True) + print("Creating SSDT-RTCAWAC...") + ssdt = """// +// Original sources from Acidanthera: +// - https://github.com/acidanthera/OpenCorePkg/blob/master/Docs/AcpiSamples/SSDT-AWAC.dsl +// - https://github.com/acidanthera/OpenCorePkg/blob/master/Docs/AcpiSamples/SSDT-RTC0.dsl +// +// Uses the CORP name to denote where this was created for troubleshooting purposes. +// +DefinitionBlock ("", "SSDT", 2, "CORP", "RTCAWAC", 0x00000000) +{ +""" + if any(x.get("has_var") for x in (awac_dict,rtc_dict)): + ssdt += """ External (STAS, IntObj) + Scope (\\) + { + Method (_INI, 0, NotSerialized) // _INI: Initialize + { + If (_OSI ("Darwin")) + { + Store (One, STAS) + } + } + } +""" + for x in (awac_dict,rtc_dict): + if not x.get("valid") or x.get("has_var") or not x.get("device"): continue + # Device was found, and it doesn't have the STAS var - check if we + # have an _STA (which would be renamed) + macos,original = ("Zero","0x0F") if x.get("dev_hid") == "ACPI000E" else ("0x0F","Zero") + if x.get("sta"): + ssdt += """ External ([[DevPath]], DeviceObj) + External ([[DevPath]].XSTA, [[sta_type]]) + Scope ([[DevPath]]) + { + Name (ZSTA, [[Original]]) + Method (_STA, 0, NotSerialized) // _STA: Status + { + If (_OSI ("Darwin")) + { + Return ([[macOS]]) + } + // Default to [[Original]] - but return the result of the renamed XSTA if possible + If (CondRefOf ([[DevPath]].XSTA)) + { + Store ([[DevPath]].XSTA[[called]], ZSTA) + } + Return (ZSTA) + } + } +""".replace("[[DevPath]]",x["device"][0]).replace("[[Original]]",original).replace("[[macOS]]",macos).replace("[[sta_type]]",x["sta_type"]).replace("[[called]]"," ()" if x["sta_type"]=="MethodObj" else "") + elif x.get("dev_hid") == "ACPI000E": + # AWAC device with no STAS, and no _STA - let's just add one + ssdt += """ External ([[DevPath]], DeviceObj) + Scope ([[DevPath]]) + { + Method (_STA, 0, NotSerialized) // _STA: Status + { + If (_OSI ("Darwin")) + { + Return (Zero) + } + Else + { + Return (0x0F) + } + } + } +""".replace("[[DevPath]]",x["device"][0]) + # Check if we need to setup an RTC range correction + if rtc_range_needed and rtc_crs_type.lower() == "buffobj" and crs_lines and rtc_dict.get("valid"): + ssdt += """ External ([[DevPath]], DeviceObj) + External ([[DevPath]].XCRS, [[type]]) + Scope ([[DevPath]]) + { + // Adjusted and renamed _CRS buffer ripped from DSDT with corrected range +[[NewCRS]] + // End of adjusted _CRS and renamed buffer + + // Create a new _CRS method that returns the result of the renamed XCRS + Method (_CRS, 0, Serialized) // _CRS: Current Resource Settings + { + If (LOr (_OSI ("Darwin"), LNot (CondRefOf ([[DevPath]].XCRS)))) + { + // Return our buffer if booting macOS or the XCRS method + // no longer exists for some reason + Return (BUFX) + } + // Not macOS and XCRS exists - return its result + Return ([[DevPath]].XCRS[[method]]) + } + } +""".replace("[[DevPath]]",rtc_dict["device"][0]) \ + .replace("[[type]]",rtc_crs_type) \ + .replace("[[method]]"," ()" if rtc_crs_type == "Method" else "") \ + .replace("[[NewCRS]]","\n".join([(" "*8)+x for x in crs_lines])) + # Check if we do not have an RTC device at all + if not rtc_dict.get("valid") and lpc_name: + ssdt += """ External ([[LPCName]], DeviceObj) // (from opcode) + Scope ([[LPCName]]) + { + Device (RTC0) + { + Name (_HID, EisaId ("PNP0B00")) // _HID: Hardware ID + Name (_CRS, ResourceTemplate () // _CRS: Current Resource Settings + { + IO (Decode16, + 0x0070, // Range Minimum + 0x0070, // Range Maximum + 0x01, // Alignment + 0x08, // Length + ) + IRQNoFlags () + {8} + }) + Method (_STA, 0, NotSerialized) // _STA: Status + { + If (_OSI ("Darwin")) + { + Return (0x0F) + } + Else + { + Return (0) + } + } + } + } +""".replace("[[LPCName]]",lpc_name) + ssdt += "}" + self.write_ssdt("SSDT-RTCAWAC",ssdt) + print("") + print("Done.") + # See if we just generated a failsafe - and encourage manual checking + # Would require only an RTC device (no AWAC) that has an _STA with no STAS var + if rtc_dict.get("valid") and not awac_dict.get("valid") and rtc_dict.get("sta") and not rtc_dict.get("has_var") and not rtc_range_needed: + print("\n {}!! NOTE !!{} Only RTC (no AWAC) detected with an _STA method and no STAS".format(self.yel,self.rst)) + print(" variable! Patch(es) and SSDT-RTCAWAC created as a failsafe,") + print(" but verify you need them by checking the RTC._STA conditions!") + self.patch_warn() + self.u.grab("Press [enter] to return...") + + def get_unique_device(self, parent_path, base_name, starting_number=0, used_names=[]): + # Appends a hex number until a unique device is found + while True: + if starting_number < 0: + # Try the original name first + name = base_name + # Ensure the starting number will be 0 next loop + starting_number = -1 + else: + # Append the number to the name + hex_num = hex(starting_number).replace("0x","").upper() + name = base_name[:-1*len(hex_num)]+hex_num + # Check if the name exists + if not len(self.d.get_device_paths(parent_path.rstrip(".")+"."+name)) and not name in used_names: + return (name,starting_number) + # Increment the starting number + starting_number += 1 + + def ssdt_rhub(self): + if not self.ensure_dsdt(): + return + self.u.head("USB Reset") + print("") + print("Gathering RHUB/HUBN/URTH devices...") + rhubs = self.d.get_device_paths("RHUB") + rhubs.extend(self.d.get_device_paths("HUBN")) + rhubs.extend(self.d.get_device_paths("URTH")) + if not len(rhubs): + print(" - None found! Aborting...") + print("") + self.u.grab("Press [enter] to return to main menu...") + return + print(" - Found {:,}".format(len(rhubs))) + # Gather some info + patches = [] + tasks = [] + used_names = [] + xhc_num = 2 + ehc_num = 1 + for x in rhubs: + task = {"device":x[0]} + print(" --> {}".format(".".join(x[0].split(".")[:-1]))) + name = x[0].split(".")[-2] + if name in self.illegal_names or name in used_names: + print(" ----> Needs rename!") + # Get the new name, and the path to the device and its parent + task["device"] = ".".join(task["device"].split(".")[:-1]) + task["parent"] = ".".join(task["device"].split(".")[:-1]) + if name.startswith("EHC"): + task["rename"],ehc_num = self.get_unique_device( + task["parent"], + "EH01", + starting_number=ehc_num, + used_names=used_names + ) + ehc_num += 1 # Increment the name number + else: + task["rename"],xhc_num = self.get_unique_device( + task["parent"], + "XHCI", + starting_number=xhc_num, + used_names=used_names + ) + xhc_num += 1 # Increment the name number + used_names.append(task["rename"]) + else: + used_names.append(name) + sta_method = self.d.get_method_paths(task["device"]+"._STA") + # Let's find out of we need a unique patch for _STA -> XSTA + if len(sta_method): + print(" ----> Generating _STA to XSTA patch") + sta_index = self.d.find_next_hex(sta_method[0][1])[1] + print(" ------> Found at index {}".format(sta_index)) + sta_hex = "5F535441" + xsta_hex = "58535441" + padl,padr = self.d.get_shortest_unique_pad(sta_hex, sta_index) + patches.append({ + "Comment":"{} _STA to XSTA Rename".format(task["device"].split(".")[-1]), + "Find":padl+sta_hex+padr, + "Replace":padl+xsta_hex+padr + }) + # Let's try to get the _ADR + scope_adr = self.d.get_name_paths(task["device"]+"._ADR") + task["address"] = self.d.get_dsdt_or_only()["lines"][scope_adr[0][1]].strip() if len(scope_adr) else "Name (_ADR, Zero) // _ADR: Address" + tasks.append(task) + oc = {"Comment":"SSDT to disable USB RHUB/HUBN/URTH and rename devices","Enabled":True,"Path":"SSDT-USB-Reset.aml"} + self.make_plist(oc, "SSDT-USB-Reset.aml", patches) + ssdt = """// +// SSDT to disable RHUB/HUBN/URTH devices and rename PXSX, XHC1, EHC1, and EHC2 devices +// +DefinitionBlock ("", "SSDT", 2, "CORP", "UsbReset", 0x00001000) +{ +""" + # Iterate the USB controllers and add external references + # Gather the parents first - ensure they're unique, and put them in order + parents = sorted(list(set([x["parent"] for x in tasks if x.get("parent",None)]))) + for x in parents: + ssdt += " External ({}, DeviceObj)\n".format(x) + for x in tasks: + ssdt += " External ({}, DeviceObj)\n".format(x["device"]) + # Let's walk them again and disable RHUBs and rename + for x in tasks: + if x.get("rename",None): + # Disable the old controller + ssdt += """ + Scope ([[device]]) + { + Method (_STA, 0, NotSerialized) // _STA: Status + { + If (_OSI ("Darwin")) + { + Return (Zero) + } + Else + { + Return (0x0F) + } + } + } + + Scope ([[parent]]) + { + Device ([[new_device]]) + { + [[address]] + Method (_STA, 0, NotSerialized) // _STA: Status + { + If (_OSI ("Darwin")) + { + Return (0x0F) + } + Else + { + Return (Zero) + } + } + } + } +""".replace("[[device]]",x["device"]).replace("[[parent]]",x["parent"]).replace("[[address]]",x.get("address","Name (_ADR, Zero) // _ADR: Address")).replace("[[new_device]]",x["rename"]) + else: + # Only disabling the RHUB + ssdt += """ + Scope ([[device]]) + { + Method (_STA, 0, NotSerialized) // _STA: Status + { + If (_OSI ("Darwin")) + { + Return (Zero) + } + Else + { + Return (0x0F) + } + } + } + """.replace("[[device]]",x["device"]) + ssdt += "\n}" + self.write_ssdt("SSDT-USB-Reset",ssdt) + print("") + print("Done.") + self.patch_warn() + self.u.grab("Press [enter] to return...") + return + + def ssdt_usbx(self): + usbx_props = { + "kUSBSleepPowerSupply":"0x13EC", + "kUSBSleepPortCurrentLimit":"0x0834", + "kUSBWakePowerSupply":"0x13EC", + "kUSBWakePortCurrentLimit":"0x0834" + } + while True: + self.u.head("USBX Device") + print("") + print("Current USBX Device Properties To Use:") + print("") + if usbx_props: + for i,x in enumerate(usbx_props,start=1): + print("{}. {} -> {}".format(i,x,usbx_props[x])) + else: + print(" - No properties set") + print("") + print("B. Build SSDT-USBX") + print("A. Remove All") + print("M. Return to Menu") + print("Q. Quit") + print("") + print("Remove a property by typing its key or number (ie kUSBSleepPowerSupply)") + print("Add/Edit a property using this format key:value (ie kUSBWakePowerSupply:0x13EC)") + print("Values must be a 16-bit hexadecimal integer") + print("") + menu = self.u.grab("Please enter your selection (default is B): ") + if not menu: menu = "b" + if menu.lower() == "m": return + elif menu.lower() == "q": self.u.custom_quit() + elif menu.lower() == "a": usbx_props = {} + elif menu.lower() == "b" and usbx_props: break + elif ":" in menu: + try: + key,value = menu.split(":") + if key.isnumeric(): # Assume they want to update a number + key = list(usbx_props)[int(key)-1] + else: # Assume we're adding a new one - make sure it's just alpha chars + key = "".join([x for x in key if x.isalpha()]) + value = self.hexy(int(value,16),pad_to=4) + assert len(value) == 6 # Ensure it's no larger than 16-bits + usbx_props[key] = value + except: pass + elif menu.isnumeric(): # Assume it's a value to remove + try: + usbx_props.pop(list(usbx_props)[int(menu)-1],None) + except: pass + else: # Assume it's a value we're trying to remove + usbx_props.pop(menu,None) + # Now build! + self.u.head("USBX Device") + print("") + print("Creating generic SSDT-USBX...") + oc = {"Comment":"Generic USBX device for USB power properties","Enabled":True,"Path":"SSDT-USBX.aml"} + self.make_plist(oc, "SSDT-USBX.aml", []) + ssdt = """// Generic USBX Device with power properties injected +// Edited from: +// https://github.com/dortania/OpenCore-Post-Install/blob/master/extra-files/SSDT-USBX.aml +DefinitionBlock ("", "SSDT", 2, "CORP", "SsdtUsbx", 0x00001000) +{ + Scope (\\_SB) + { + Device (USBX) + { + Name (_ADR, Zero) // _ADR: Address + Method (_DSM, 4, NotSerialized) // _DSM: Device-Specific Method + { + If (LNot (Arg2)) + { + Return (Buffer () + { + 0x03 + }) + } + Return (Package () + {""" + for i,key in enumerate(usbx_props,start=1): + ssdt += "\n \"{}\",".format(key) + ssdt += "\n {}".format(usbx_props[key]) + if i < len(usbx_props): ssdt += "," + ssdt += """ + }) + } + Method (_STA, 0, NotSerialized) // _STA: Status + { + If (_OSI ("Darwin")) + { + Return (0x0F) + } + Else + { + Return (Zero) + } + } + } + } +}""" + self.write_ssdt("SSDT-USBX",ssdt) + print("") + print("Done.") + self.patch_warn() + self.u.grab("Press [enter] to return...") + return + + def ssdt_xosi(self): + if not self.ensure_dsdt(): + return + # Let's see what, if any, the highest version contained in the DSDT is + highest_osi = None + for x in self.osi_strings: + if self.osi_strings[x] in self.d.get_dsdt_or_only()["table"]: + highest_osi = x + while True: + lines = [""] + pad = len(str(len(self.osi_strings))) + for i,x in enumerate(self.osi_strings,start=1): + lines.append("{}. {} ({})".format(str(i).rjust(pad),x,self.osi_strings[x])) + if highest_osi: + lines.append("") + lines.append("A. Auto-Detected ({} - {})".format(highest_osi,self.osi_strings[highest_osi])) + lines.append("") + lines.append("M. Main") + lines.append("Q. Quit") + lines.append("") + if self.resize_window: + self.u.resize(self.w, max(len(lines)+4,self.h)) + self.u.head("XOSI") + print("\n".join(lines)) + menu = self.u.grab("Please select the latest Windows version for SSDT-XOSI{}: ".format( + " (default is A)" if highest_osi else "" + )) + if not len(menu): menu = "a" # Use the default if we passed nothing + if menu.lower() == "m": return + if menu.lower() == "q": + if self.resize_window: + self.u.resize(self.w,self.h) + self.u.custom_quit() + if menu.lower() == "a" and highest_osi: + target_string = highest_osi + break + # Make sure we got a number - and it's within our range + try: + target_string = list(self.osi_strings)[int(menu)-1] + except: + continue + # Got a valid option - break out and create the SSDT + break + if self.resize_window: + self.u.resize(self.w,self.h) + self.u.head("XOSI") + print("") + print("Creating SSDT-XOSI with support through {}...".format(target_string)) + ssdt = """DefinitionBlock ("", "SSDT", 2, "CORP", "XOSI", 0x00001000) +{ + Method (XOSI, 1, NotSerialized) + { + // Edited from: + // https://github.com/dortania/Getting-Started-With-ACPI/blob/master/extra-files/decompiled/SSDT-XOSI.dsl + // Based off of: + // https://docs.microsoft.com/en-us/windows-hardware/drivers/acpi/winacpi-osi#_osi-strings-for-windows-operating-systems + // Add OSes from the below list as needed, most only check up to Windows 2015 + // but check what your DSDT looks for + Store (Package () + { +""" + # Iterate our OS versions, and stop once we've added the last supported + for i,x in enumerate(self.osi_strings,start=1): + osi_string = self.osi_strings[x] + ssdt += " \"{}\"".format(osi_string) + if x == target_string or i==len(self.osi_strings): # Last one - bail + ssdt += " // "+x + break + ssdt += ", // "+x+"\n" # Add a comma and newline for the next value + ssdt +=""" + }, Local0) + If (_OSI ("Darwin")) + { + Return (LNotEqual (Match (Local0, MEQ, Arg0, MTR, Zero, Zero), Ones)) + } + Else + { + Return (_OSI (Arg0)) + } + } +}""" + patches = [] + print("Checking for OSID Method...") + osid = self.d.get_method_paths("OSID") + if osid: + print(" - Located {} Method at offset {}".format(osid[0][0],osid[0][1])) + print(" - Creating OSID to XSID rename...") + patches.append({ + "Comment":"OSID to XSID rename - must come before _OSI to XOSI rename!", + "Find":"4F534944", + "Replace":"58534944", + "Table":None # Apply to all tables + }) + else: + print(" - Not found, no OSID to XSID rename needed") + print("Creating _OSI to XOSI rename...") + patches.append({ + "Comment":"_OSI to XOSI rename - requires SSDT-XOSI.aml", + "Find":"5F4F5349", + "Replace":"584F5349", + "Table":None # Apply to all tables + }) + self.write_ssdt("SSDT-XOSI",ssdt) + oc = {"Comment":"_OSI override to return true through {} - requires _OSI to XOSI rename".format(target_string),"Enabled":True,"Path":"SSDT-XOSI.aml"} + self.make_plist(oc, "SSDT-XOSI.aml", patches, replace=True) + print("") + print("Done.") + self.patch_warn() + self.u.grab("Press [enter] to return...") + return + + def get_address_from_line(self, line, split_by="_ADR, ", table=None): + if table is None: + table = self.d.get_dsdt_or_only() + try: + return int(table["lines"][line].split(split_by)[1].split(")")[0].replace("Zero","0x0").replace("One","0x1"),16) + except: + return None + + def hexy(self,integer,pad_to=0): + return "0x"+hex(integer)[2:].upper().rjust(pad_to,"0") + + def get_bridge_devices(self, path): + # Takes a Pci(x,x)/Pci(x,x) style path, and returns named bridges and addresses + adrs = re.split(r"#|\/",path.lower().replace("pciroot(","").replace("pci(","").replace(")","")) + # Walk the addresses and create our bridge objects + bridges = [] + for bridge in adrs: + if not len(bridge): continue # Skip empty entries + if not "," in bridge: return # Uh... we don't want to bridge the PciRoot - something's wrong. + try: + adr1,adr2 = [int(x,16) for x in bridge.split(",")] + # Join the addresses as a 32-bit int + adr_int = (adr1 << 16) + adr2 + bridges.append(adr_int) + except: + return [] # Failed :( + return bridges + + def sanitize_device_path(self, device_path): + # Walk the device_path, gather the addresses, and rebuild it + if not device_path.lower().startswith("pciroot("): + # Not a device path - bail + return + # Strip out PciRoot() and Pci() - then split by separators + adrs = re.split(r"#|\/",device_path.lower().replace("pciroot(","").replace("pci(","").replace(")","")) + new_path = [] + for i,adr in enumerate(adrs): + if i == 0: + # Check for roots + if "," in adr: return # Broken + try: new_path.append("PciRoot({})".format(self.hexy(int(adr,16)))) + except: return # Broken again :( + else: + if "," in adr: # Not Windows formatted + try: adr1,adr2 = [int(x,16) for x in adr.split(",")] + except: return # REEEEEEEEEE + else: + try: + adr = int(adr,16) + adr2,adr1 = adr & 0xFF, adr >> 8 & 0xFF + except: return # AAAUUUGGGHHHHHHHH + # Should have adr1 and adr2 - let's add them + new_path.append("Pci({},{})".format(self.hexy(adr1),self.hexy(adr2))) + return "/".join(new_path) + + def get_longest_match(self, device_dict, match_path, adj=False, exclusions_list=None): + matches = self.get_all_matches(device_dict,match_path,adj=adj,exclusions_list=exclusions_list) + if not matches: return + return sorted(matches,key=lambda x:x[-1],reverse=True)[0] + + def get_all_matches(self, device_dict, match_path, adj=False, exclusions_list=None): + matched = None + exact = False + key = "adj_path" if adj else "path" + matches = [] + for d in device_dict: + try: + if any(d.startswith(x) for x in exclusions_list): + # Skip excluded paths, and all child elements under + # those paths. + continue + except: + pass + device = device_dict[d].get(key) + if not device: continue + if match_path.lower().startswith(device.lower()): + matches.append((d,device_dict[d],device.lower()==match_path.lower(),len(device))) + return matches + + def get_device_path(self): + paths = {} + acpi_exclusions = [] + while True: + self.u.head("Input Device Path") + print("") + print("Current Paths:") + # Retain order to prevent any odd drifting + sorted_paths = self.sorted_nicely(paths) + paths_length = len(sorted_paths) + if not paths: + print(" - None") + else: + for i,x in enumerate(sorted_paths,start=1): + if paths[x]: + print("{}. {} {}".format( + str(i).rjust(2),x,paths[x] + )) + else: + print("{}. {}".format(str(i).rjust(2),x)) + if acpi_exclusions: + print("") + print("ACPI Devices Excluded:") + for i,x in enumerate(self.sorted_nicely(acpi_exclusions),start=1): + print("{}. {}".format(str(i+paths_length).rjust(2),x)) + print("") + print("A valid device path will have one of the following formats,") + print("optionally followed by a 4-digit device name:") + print("") + print("macOS: PciRoot(0x0)/Pci(0x0,0x0)/Pci(0x0,0x0)") + print("Windows: PCIROOT(0)#PCI(0000)#PCI(0000)") + print("") + if paths: + print("A. Accept Paths and Continue") + print("C. Clear All Device Paths") + if acpi_exclusions: + print("X. Clear All ACPI Exclusions") + print("M. Main") + print("Q. Quit") + print("") + print("Enter the number next to a device/ACPI path above to remove it.") + print("Enter an ACPI path to exclude it from the checks.") + print("Drag and drop a config.plist to extract device paths from within.") + print("") + if self.copy_as_path: + print("NOTE: Currently running as admin on Windows - drag and drop may not work.") + print(" Shift + right-click in Explorer and select 'Copy as path' then paste here instead.") + print("") + path = self.u.grab("Please enter the device path needing bridges:\n\n") + if path.lower() == "m": + return + elif path.lower() == "q": + self.u.custom_quit() + elif path.lower() == "a" and paths: + return (paths, acpi_exclusions) + elif path.lower() == "c": + paths = {} + continue + elif path.lower() == "x" and acpi_exclusions: + acpi_exclusions = [] + continue + # Check if it's a number first + try: + path_int = int(path) + if path_int <= paths_length: + # Removing device path + del paths[sorted_paths[path_int-1]] + else: + # Removing ACPI path + del acpi_exclusions[path_int-paths_length-1] + continue + except: + pass + # Check if it's an ACPI path + acpi_dev = None + if len(path) >= 3 and not " " in path: + # Make sure we send at least 3 chars before + # looking for ACPI paths + acpi_dev = self.sanitize_acpi_path(path) + if acpi_dev: + # Got an ACPI path, make sure it exists + acpi_path = ".".join(acpi_dev) + # Search the tables for any matches - only look for actual + # Device () definitions though, not Scope () sets as well. + matched_devices = [] + for table_name in self.sorted_nicely(list(self.d.acpi_tables)): + table = self.d.acpi_tables[table_name] + matched_devices += [(x,table_name) for x in self.d.get_device_paths(obj=acpi_path,table=table)] + if matched_devices: + # We have at least one matched device + if len(matched_devices) > 1: + # We have multiples - prompt for which to add + bail = False + while True: + self.u.head("Multiple Matches") + print("") + print("There are {:,} matches for {}:".format(len(matched_devices),acpi_path)) + print("") + for i,m in enumerate(matched_devices,start=1): + print("{}. {} ({})".format(str(i).rjust(2),m[0][0],m[1])) + print("") + print("M. Device Path Menu") + print("Q. Quit") + print("") + d = self.u.grab("Please select the ACPI path to exclude: ") + if d.lower() == "m": + bail = True + break + elif d.lower() == "q": + self.u.custom_quit() + try: + d = int(d) + assert 0 < d <= len(matched_devices) + except: + continue + # Got one - select it + matched_devices = [matched_devices[d-1]] + break + if bail: + # We wanted to return to the menu + continue + # We got a single matched device - add only its path + # to the list. + if not matched_devices[0][0][0] in acpi_exclusions: + acpi_exclusions.append(matched_devices[0][0][0]) + continue + # Check if we got a file path + file_path = self.u.check_path(path) + if file_path and file_path.lower().endswith(".plist") and os.path.isfile(file_path): + # Try loading it + file_name = os.path.basename(file_path) + self.u.head("Processing {}".format(file_name)) + print("") + print("Loading {}...".format(file_name)) + try: + with open(file_path,"rb") as f: + passed_plist = plist.load(f) + except Exception as e: + print(" - Failed to open: {}".format(e)) + print("") + self.u.grab("Press [enter] to return...") + continue + print("Verifying root node type...") + if not isinstance(passed_plist,dict): + print(" - Invalid type - must be dictionary") + print("") + self.u.grab("Press [enter] to return...") + continue + print("Gathering device paths...") + dp_keys = None + try: + # OpenCore pathing + dp_keys = passed_plist.get("DeviceProperties",{}).get("Add",{}) + except: + pass + if not dp_keys: + try: + # Clover pathing + dp_keys = passed_plist.get("Devices",{}).get("Properties") + except: + pass + if not dp_keys or not isinstance(dp_keys,dict): + print(" - No device paths located.") + print("") + self.u.grab("Press [enter] to return...") + continue + print("Iterating {:,} device paths...".format(len(dp_keys))) + any_failed = False + for d in dp_keys: + print(" - {}".format(d)) + d_path = self.sanitize_device_path(d) + if not d_path: + print(" --> Invalid device path - skipping") + any_failed = True + continue + if d_path in paths: + print(" --> Already exists in device path list - skipping") + continue + # Add it + paths[d_path] = None + if any_failed: + print("") + self.u.grab("Press [enter] to return...") + continue + # Extract the path and device + # if specified + path_dev = path.split() + dev = None + if len(path_dev) == 1: + path = path_dev[0] + elif len(path_dev) == 2: + path,dev = path_dev + else: + continue # Incorrect formatting + # Make sure the device is valid + if dev: + dev = dev.replace("_","")[:4] + if not dev.isalnum(): + continue + dev = dev.upper().ljust(4,"0") + path = self.sanitize_device_path(path) + if not path: + continue + # Add our path at the end + paths[path] = dev + + def get_device_paths(self): + print("Gathering ACPI devices...") + device_dict = {} + pci_root_paths = [] + orphaned_devices = [] + sanitized_paths = [] + for table_name in self.sorted_nicely(list(self.d.acpi_tables)): + table = self.d.acpi_tables[table_name] + # Let's gather our roots - and any other paths that and in _ADR + pci_roots = self.d.get_device_paths_with_id(_id="PNP0A08",table=table) + pci_roots += self.d.get_device_paths_with_id(_id="PNP0A03",table=table) + pci_roots += self.d.get_device_paths_with_id(_id="ACPI0016",table=table) + paths = self.d.get_path_of_type(obj_type="Name",obj="_ADR",table=table) + # Let's create our dictionary device paths - starting with the roots + for path in pci_roots: + if path[0] in device_dict: continue # Already have it + device_uid = self.d.get_name_paths(obj=path[0]+"._UID",table=table) + if device_uid and len(device_uid)==1: + adr = self.get_address_from_line(device_uid[0][1],split_by="_UID, ",table=table) + else: # Assume 0 + adr = 0 + device_dict[path[0]] = {"path":"PciRoot({})".format(self.hexy(adr))} + pci_root_paths.append(device_dict[path[0]]) + # First - let's create a new list of tuples with the ._ADR stripped + # The goal here is to ensure pathing is listed in the proper order. + sanitized_paths.extend([( + x[0][0:-5], # The path minus the ._ADR/._UID + x[1], # The line number + x[2], # The type of the match (Name, Device, Method, etc) + self.get_address_from_line(x[1],table=table) # Address + ) for x in paths]) + print("Generating device paths...") + def check_path(path,device_dict): + # Returns a bool depending on the checks + # True = added, already added, ignore + # False = orphaned + adr = path[3] # Retain the address + adr_overflow = False + # Let's bitshift to get both addresses + try: + adr1,adr2 = adr >> 16 & 0xFFFF, adr & 0xFFFF + radr1,radr2 = adr1,adr2 # Save placeholders in case we overflow + if adr1 > 0xFF: # Overflowed + adr_overflow = True + radr1 = 0 + if adr2 > 0xFF: # Overflowed + adr_overflow = True + radr2 = 0 + except: + return True # Bad address? + # Let's check if our path already exists + if path[0] in device_dict: + return True # Skip + # Doesn't exist - let's see if the parent path does? + parent = ".".join(path[0].split(".")[:-1]) + parent_device = device_dict.get(parent) + if not parent_device or not parent_device.get("path"): + # No parent either - let's keep track of the device + # as an orphan - and check it at the end + return False + # Our parent path exists - let's copy its device_path, and append our addressing + device_path = parent_device["path"] + device_path += "/Pci({},{})".format(self.hexy(adr1),self.hexy(adr2)) + device_dict[path[0]] = {"path":device_path} + # Check if either we, or our parent has an adr overflow + if adr_overflow or parent_device.get("adr_overflow"): + device_dict[path[0]]["adr_overflow"] = True + parent_path = parent_device.get("adj_path",parent_device["path"]) + device_dict[path[0]]["adj_path"] = parent_path + "/Pci({},{})".format(self.hexy(radr1),self.hexy(radr2)) + if adr_overflow: # It was us, not a parent + dev_overflow = device_dict[path[0]].get("dev_overflow",[]) + dev_overflow.append(path[0]) + device_dict[path[0]]["dev_overflow"] = dev_overflow + return True + for path in sorted(sanitized_paths): + if not check_path(path,device_dict): + orphaned_devices.append(path) + if orphaned_devices: + print("Rechecking orphaned devices...") + while True: + removed = [] + for path in orphaned_devices: + if check_path(path,device_dict): + removed.append(path) + if not removed: break + for r in removed: + try: orphaned_devices.remove(r) + except ValueError: pass + return (device_dict,pci_root_paths) + + def print_unmatched(self, unmatched=None, pci_root_paths=None): + print("") + if unmatched: + print("{}!! WARNING !!{} No matches were found for the following paths:".format(self.yel,self.rst)) + print("\n".join([" {}".format(x) for x in sorted(unmatched)])) + else: + print("{}!! WARNING !!{} No matches found!".format(self.yel,self.rst)) + if pci_root_paths: + print("\n{}!! WARNING !!{} Device paths must start with one of the following PciRoot()".format(self.yel,self.rst)) + print(" options to match the current ACPI tables:") + print("\n".join([" {}".format(x.get("path",x)) for x in sorted(pci_root_paths)])) + + def print_address_overflow(self, addr_overflow): + print("") + print("{}!! WARNING !!{} There are _ADR overflows in the device path!".format(self.red,self.rst)) + print(" The following devices may need adjustments for bridges to work:") + # Ensure they're all unique, and we sort them as we print + for d in sorted(list(set(addr_overflow))): + print(" {}".format(d)) + + def print_failed_bridges(self, failed_bridges): + print("\n{}!! WARNING !!{} The following bridges failed to resolve:".format(self.yel,self.rst)) + print("\n".join([" {}".format(x) for x in sorted(failed_bridges)])) + + def pci_bridge(self): + if not self.ensure_dsdt(): return + path_dict = self.get_device_path() + if not path_dict: return + # Break out the paths from any acpi exclusions + path_dict,acpi_exclusions = path_dict + self.u.head("Building Bridges") + print("") + device_dict,pci_root_paths = self.get_device_paths() + matches = [] + unmatched = [] + print("Matching device paths...") + for p in sorted(path_dict): + print(" - {}".format(p)) + match = self.get_longest_match(device_dict,p,exclusions_list=acpi_exclusions) + if not match: + print(" --> No match found!") + unmatched.append(p) + else: + # We got a match - check if we need to list bridges needed + if match[2]: + print(" --> Matched {} - no bridge needed".format(match[0])) + else: + b = p[match[-1]+1:].count("/")+1 + print(" --> Matched {} - {:,} bridge{} needed".format( + match[0], + b, + "" if b==1 else "s" + )) + matches.append((p,match)) + # See if we have any addresses that overflow + over_match = self.get_longest_match(device_dict,p,adj=True) + if over_match: + print("\n{}!! WARNING !!{} Also matched the following devices whose addresses overflow which".format(self.yel,self.rst)) + print(" may prevent bridges and DeviceProperties from working correctly:") + print("\n".join([" {}".format(x) for x in sorted(over_match[1].get("dev_overflow",[over_match[0]]))])) + if not matches: + self.print_unmatched(unmatched=unmatched,pci_root_paths=pci_root_paths) + print("") + print("No matches found!") + print("") + self.u.grab("Press [enter] to return...") + return + # Check for, and warn about address overflows + addr_overflow = [] + for test_path,match in matches: + if match[1].get("adr_overflow"): + # Get all matches and list the devices whose addresses overflow + over_flow = self.get_all_matches(device_dict,match[1]["path"]) + for d in over_flow: + if d[1].get("dev_overflow"): + addr_overflow.extend(d[1]["dev_overflow"]) + # Make sure we have something to display - at least + if all(match[1][2] for match in matches): + if unmatched: + self.print_unmatched(unmatched=unmatched,pci_root_paths=pci_root_paths) + if addr_overflow: + self.print_address_overflow(addr_overflow) + print("") + print("No bridges needed!") + print("") + self.u.grab("Press [enter] to return...") + return + starting_at = 0 + print("") + print("Resolving bridges...") + bridge_match = {} + bridge_list = [] + failed_bridges = [] + external_refs = [] + for test_path,match in matches: + if match[2]: + continue # Skip full matches + remain = test_path[match[-1]+1:] + print(" - {}".format(remain)) + bridges = self.get_bridge_devices(remain) + if not bridges: + print(" --> Could not resolve!") + failed_bridges.append(test_path) + else: + # Join the elements separated by a space to + # make parsing easier + path = match[0] + for i,b in enumerate(bridges,start=1): + path += " " + str(b) + if not path in bridge_list: + bridge_list.append(path) + # Retain the final path for comments later + if i == len(bridges): + bridge_match[path] = test_path + # Retain the ACPI path for the SSDT + if not match[0] in external_refs: + external_refs.append(match[0]) + # Make sure we have something in order to continue + if not bridge_list: + if failed_bridges: + self.print_failed_bridges(failed_bridges) + if unmatched: + self.print_unmatched(unmatched=unmatched,pci_root_paths=pci_root_paths) + if addr_overflow: + self.print_address_overflow(addr_overflow) + print("") + print("Something went wrong resolving bridges!") + print("") + self.u.grab("Press [enter] to return...") + return + print("") + print("Creating SSDT-Bridge...") + # First - we need to define our header and external references + ssdt = """// Source and info from: +// https://github.com/acidanthera/OpenCorePkg/blob/master/Docs/AcpiSamples/Source/SSDT-BRG0.dsl +DefinitionBlock ("", "SSDT", 2, "CORP", "PCIBRG", 0x00000000) +{ + /* + * Start copying here if you're adding this info to an existing SSDT-Bridge! + */ + +""" + for acpi in external_refs: + # Walk our external refs and define them at the top + ssdt += " External ({}, DeviceObj)\n".format(acpi) + ssdt += "\n" + + def close_brackets(ssdt,depth,iterations=1,pad=" "): + # Helper to close brackets based on depth and + # iteration count + while iterations > 0: + ssdt += (pad*depth)+"}\n" + iterations -= 1 + depth -= 1 + return ssdt + + # Walk the bridge list and define elements as we go + last_path = [] + pad = " " + acpi = None + bridge_names = {} + acpi_paths = {} + # Sorting ensures our hierarchy should remain intact + # which vastly simplifies the work we have to do + for element in sorted(bridge_list): + # Split our element into path components + comp = element.split() + _acpi = comp[0] + # Find our longest match with the last path checked + _match = 0 + for i in range(min(len(comp),len(last_path))): + if comp[i] != last_path[i]: + break + _match += 1 + # Close any open brackets that weren't matched + if last_path: + ssdt = close_brackets(ssdt,len(last_path),len(last_path)-_match) + # Retain the last path + last_path = comp + if _acpi != acpi: + # Set a new scope if we found a different + # ACPI path + acpi = _acpi + ssdt += pad+"Scope ({})\n".format(acpi) + ssdt += pad+"{\n" + curr_depth = len(comp) + if curr_depth == 0: + continue # top level.. somehow? Skip. + # Got a new device - pad and define + parent_path = " ".join(comp[:-1]) + # Get our bridge name by keeping track of + # the number of bridges per parent path + if not parent_path in bridge_names: + bridge_names[parent_path] = [] + # Generate a name from the bridge number + parent_acpi = acpi_paths.get(parent_path,_acpi) + brg_basename = path_dict.get(bridge_match.get(element)) + name,num = self.get_unique_device( + parent_acpi, + brg_basename or "BRG0", + starting_number=-1, # Try the original name first + used_names=bridge_names[parent_path] + ) + # Add our path to the dict to increment the next count + bridge_names[parent_path].append(name) + # Set our acpi path for any child elements + acpi_paths[element] = parent_acpi+"."+name + # Get our padding + p = pad*(curr_depth) + # If this is a final bridge - add note about customization + if element in bridge_match: + if brg_basename: + # We got a custom bridge name + if brg_basename != name: + # It was overridden - make a note + ssdt += p+"// User-provided name '{}' supplied, incremented for uniqueness\n".format(brg_basename) + else: + # It was used as provided + ssdt += p+"// User-provided name '{}' supplied\n".format(brg_basename) + else: + # Just leave a note about potentially customizing the name + ssdt += p+"// Customize the following device name if needed, eg. GFX0\n" + # Set up our device definition + ssdt += p+"Device ({})\n".format(name) + ssdt += p+"{\n" + # Increase our padding + p += pad + if element in bridge_match: + # Add our comment + ssdt += "{0}// Target Device Path:\n{0}// {1}\n".format( + p, + bridge_match[element] + ) + # Format the address: 0 = Zero, 1 = One, + # others are padded to 8 hex digits if + # > 0xFFFF + adr_int = int(comp[-1]) + adr = { + 0:"Zero", + 1:"One" + }.get( + adr_int, + "0x"+hex(adr_int).upper()[2:].rjust( + 8 if adr_int > 0xFFFF else 0, + "0" + ) + ) + ssdt += "{}Name (_ADR, {})\n".format(p,adr) + # We finished parsing - clean up after ourselves + if last_path: + last_depth = len(last_path) + # Close any missing elements + ssdt = close_brackets(ssdt,last_depth,last_depth) + # Close the final bracket + ssdt += """ + /* + * End copying here if you're adding this info to an existing SSDT-Bridge! + */ +} +""" + self.write_ssdt("SSDT-Bridge",ssdt) + oc = {"Comment":"Defines missing PCI bridges for property injection","Enabled":True,"Path":"SSDT-Bridge.aml"} + self.make_plist(oc, "SSDT-Bridge.aml", ()) + print("") + print("Done.") + if failed_bridges: + self.print_failed_bridges(failed_bridges) + if unmatched: + self.print_unmatched(unmatched=unmatched,pci_root_paths=pci_root_paths) + if addr_overflow: + self.print_address_overflow(addr_overflow) + self.patch_warn() + self.u.grab("Press [enter] to return...") + return + + def sanitize_acpi_path(self, path): + # Takes an ACPI path either using ACPI()#ACPI() or + # PATH.PATH notation and breaks it into a list of + # elements without the leading backslash, and without + # the trailing underscore pads + path = path.replace("ACPI(","").replace(")","").replace("#",".").replace("\\","") + new_path = [] + valid = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_" + for element in path.split("."): + element = element.rstrip("_").upper() + if len(element) > 4 or not all(x in valid for x in element): + # Invalid element, return None + return None + new_path.append(element) + return new_path + + def compare_acpi_paths(self, path, path_list): + path_check = self.sanitize_acpi_path(path) + if not path_check: + return False # Invalid path + if not len(path_list) == len(path_check): + return False + # Same length - check the elements + return all((path_list[i] == path_check[i] for i in range(len(path_list)))) + + def get_acpi_path(self): + while True: + self.u.head("Input ACPI Path") + print("") + print("A valid ACPI path will have one of the following formats:") + print("") + print("macOS: \\_SB.PCI0.XHC.RHUB") + print("Windows: ACPI(_SB_)#ACPI(PCI0)#ACPI(XHC_)#ACPI(RHUB)") + print("") + print("M. Main") + print("Q. Quit") + print(" ") + path = self.u.grab("Please enter the ACPI path:\n\n") + if path.lower() == "m": + return + if path.lower() == "q": + self.u.custom_quit() + path = self.sanitize_acpi_path(path) + if not path: continue + return path + + def print_acpi_path(self, path): + # Takes a list of ACPI path elements, and formats them + # into a single path. Will ensure the first element starts + # with \ + return ".".join([("\\" if i==0 else"")+x.lstrip("\\").rstrip("_") for i,x in enumerate(path)]) + + def acpi_device_path(self): + if not self.ensure_dsdt(): return + test_path = self.get_acpi_path() + if not test_path: return + print_path = self.print_acpi_path(test_path) + self.u.head("ACPI -> Device Path") + print("") + device_dict,_ = self.get_device_paths() + print("Matching against {}".format(print_path)) + p = next( + (x for x in device_dict if self.compare_acpi_paths(x,test_path)), + None + ) + if not p: + print(" - Not found!") + print("") + self.u.grab("Press [enter] to return...") + return + print(" - Matched: {}".format(device_dict[p]["path"])) + if device_dict[p].get("adr_overflow"): + # Get all matches and list the devices whose addresses overflow + over_flow = self.get_all_matches(device_dict,device_dict[p]["path"]) + devs = [] + for d in over_flow: + if d[1].get("dev_overflow"): + devs.extend(d[1]["dev_overflow"]) + # Make sure we have something to display - at least + if devs: + print("\n{}!! WARNING !!{} There are _ADR overflows in the device path!".format(self.red,self.rst)) + print(" - The following devices may affect property injection:") + # Ensure they're all unique, and we sort them as we print + for d in sorted(list(set(devs))): + print(" --> {}".format(d)) + print("") + self.u.grab("Press [enter] to return...") + return + + def ssdt_pnlf(self): + if not self.ensure_dsdt(allow_any=True): return + # Let's get our _UID + while True: + self.u.head("Select _UID for PNLF") + print("") + print("_UID | Supported Platform(s) | PWMMax") + print("-----------------------------------------------") + print(" 14 | Arrandale, Sandy/Ivy Bridge | 0x0710") + print(" 15 | Haswell/Broadwell | 0x0AD9") + print(" 16 | Skylake/Kaby Lake, some Haswell | 0x056C") + print(" 17 | Custom LMAX | 0x07A1") + print(" 18 | Custom LMAX | 0x1499") + print(" 19 | CoffeeLake and newer (or AMD) | 0xFFFF") + print(" 99 | Other (requires custom applbkl-name/applbkl-data dev props)") + print("") + print("The _UID tells WhateverGreen what backlight data to use.") + print("More info can be found in WEG's kern_weg.cpp here under appleBacklightData") + print("") + print("M. Main Menu") + print("Q. Quit") + print("") + menu = self.u.grab("Please select the target _UID value: ") + if menu.lower() == "m": return + elif menu.lower() == "q": self.u.custom_quit() + try: uid = int(menu) + except: continue + if not uid in (14,15,16,17,18,19): + while True: + self.u.head("Custom _UID for PNLF") + print("") + print("{} is a custom _UID which may require customization to setup,".format(uid)) + print("or not have support at all.") + print("") + print("M. Return to Main Menu") + print("Q. Quit") + print("") + menu = self.u.grab("Are you sure you want to use it? (y/n): ") + if menu.lower() == "q": + self.u.custom_quit() + elif menu.lower() == "m": + return + if not menu.lower() in ("y","n"): continue + break + if menu.lower() == "n": continue + break + get_igpu = False + igpu = "" + guessed = manual = False + if uid == 14: + while True: + self.u.head("Arrandale/SNB/IVB _UID") + print("") + print("Some machines using _UID 14 have problems with max brightness or") + print("other issues. In order to fix these - the iGPU device path must") + print("be discovered and some GPU registers need to be set.") + print("") + print("{}!! WARNING !!{} It is recommended to try WITHOUT this first!!".format(self.yel,self.rst)) + print("") + print("M. Return to Main Menu") + print("Q. Quit") + print("") + gpu_reg = self.u.grab("Would you like to include GPU register code? (y/n): ") + if gpu_reg.lower() == "q": + self.u.custom_quit() + elif gpu_reg.lower() == "m": + return + elif gpu_reg.lower() == "y": + get_igpu = True + break + elif gpu_reg.lower() == "n": + break # Leave the loop + self.u.head("Generating PNLF") + print("") + print("Creating SSDT-PNLF...") + print(" - _UID: {}".format(uid)) + # Check if we are building the SSDT with a _UID of 14 + if get_igpu: + print(" - Setting PWMMax calculations") + print("Looking for iGPU device at 0x00020000...") + for table_name in self.sorted_nicely(list(self.d.acpi_tables)): + table = self.d.acpi_tables[table_name] + print(" Checking {}...".format(table_name)) + # Try to gather our iGPU device + paths = self.d.get_path_of_type(obj_type="Name",obj="_ADR",table=table) + for path in paths: + adr = self.get_address_from_line(path[1],table=table) + if adr == 0x00020000: + igpu = path[0][:-5] + print(" - Found at {}".format(igpu)) + break + if igpu: + break # Leave the table search loop + if not igpu: # Try matching by name + print("Not found by address!") + print("Searching common iGPU names...") + for table_name in self.sorted_nicely(list(self.d.acpi_tables)): + table = self.d.acpi_tables[table_name] + print(" Checking {}...".format(table_name)) + pci_roots = self.d.get_device_paths_with_id(_id="PNP0A08",table=table) + pci_roots += self.d.get_device_paths_with_id(_id="PNP0A03",table=table) + pci_roots += self.d.get_device_paths_with_id(_id="ACPI0016",table=table) + external = [] + for line in table["lines"]: + if not line.strip().startswith("External ("): continue # We don't need it + try: + path = line.split("(")[1].split(", ")[0] + # Prepend the backslash and ensure trailing underscores are stripped. + path = "\\"+".".join([x.rstrip("_").replace("\\","") for x in path.split(".")]) + external.append(path) + except: pass + for root in pci_roots: + for name in ("IGPU","_VID","VID0","VID1","GFX0","VGA","_VGA"): + test_path = "{}.{}".format(root[0],name) + device = self.d.get_device_paths(test_path,table=table) + if device: device = device[0][0] # Unpack to the path + else: + # Walk the external paths and see if it's declared elsewhere? + # We're not patching anything directly - just getting a pathing + # reference, so it's fine to not have the surrounding code. + device = next((x for x in external if test_path == x),None) + if not device: continue # Not found :( + # Got a device - see if it has an _ADR, and skip if so - as it was wrong in the prior loop + if self.d.get_path_of_type(obj_type="Name",obj=device+"._ADR",table=table): continue + # At this point - we got a hit + igpu = device + guessed = True + print(" - Found likely iGPU device at {}".format(igpu)) + if igpu: + break # Leave the table search loop + if get_igpu and (not igpu or guessed): + # We need to prompt the user based on what we have + if igpu: + while True: + self.u.head("iGPU Path") + print("") + print("Found likely iGPU at {}".format(igpu)) + print("") + print("M. Return to Main Menu") + print("Q. Quit") + print("") + manual_igpu = self.u.grab("Would you like to use this path? (y/n): ") + if manual_igpu.lower() == "q": + self.u.custom_quit() + elif manual_igpu.lower() == "m": + return + elif manual_igpu.lower() == "y": + break + elif manual_igpu.lower() == "n": + igpu = "" + break # Leave the loop + if not igpu: + while True: + self.u.head("Custom iGPU Path") + print("") + if not guessed: + print("No valid iGPU path was found in the passed ACPI table(s).\n") + print("Please type the iGPU ACPI path to use. Each path element is limited") + print("to 4 alphanumeric characters (starting with a letter or underscore),") + print("and separated by spaces.") + print("") + print("e.g. _SB_.PCI0.GFX0") + print("") + print("M. Return to Main Menu") + print("Q. Quit") + print("") + manual_igpu = self.u.grab("Please type the iGPU path to use: ") + if manual_igpu.lower() == "q": + self.u.custom_quit() + elif manual_igpu.lower() == "m": + return + else: # Maybe got a path - qualify it + parts = manual_igpu.lstrip("\\").upper().split(".") + # Make sure it's between 1 and 4 chars long, and doesn't start with a number + valid = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_" + nostart = "0123456789" + if any(not 0> 16, Local1) + If (LNot (Local1)) + { + Store (Local2, Local1) + } + If (LNotEqual (Local2, Local1)) + { + // set new backlight PWMMax but retain current backlight level by scaling + Store ((LEVL * Local2) / Local1, Local0) + Store (Local2 << 16, Local3) + If (LGreater (Local2, Local1)) + { + // PWMMax is getting larger... store new PWMMax first + Store (Local3, LEVX) + Store (Local0, LEVL) + } + Else + { + // otherwise, store new brightness level, followed by new PWMMax + Store (Local0, LEVL) + Store (Local3, LEVX) + } + } + } + } + }""" + ssdt += """ + } +}""" + # Perform the replacements + ssdt = ssdt.replace("[[uid_value]]",self.hexy(uid)).replace("[[uid_dec]]",str(uid)).replace("[[igpu_path]]",igpu) + self.write_ssdt("SSDT-PNLF",ssdt) + oc = { + "Comment":"Defines PNLF device with a _UID of {} for backlight control{}".format( + uid, + " - requires PNLF to XNLF rename" if any("XNLF" in p["Comment"] for p in patches) else "" + ), + "Enabled":True, + "Path":"SSDT-PNLF.aml" + } + self.make_plist(oc, "SSDT-PNLF.aml", patches, replace=True) + if igpu: + if guessed: + print("\n{}!! WARNING !!{} iGPU path was guessed to be {}\n !!VERIFY BEFORE USING!!".format(self.red,self.rst,igpu)) + if manual: + print("\n{}!! WARNING !!{} iGPU path was manually set to {}\n !!VERIFY BEFORE USING!!".format(self.red,self.rst,igpu)) + if has_nbcf_old or has_nbcf_new: + print("\n{}!! WARNING !!{} NBCF patch was generated - VERIFY BEFORE ENABLING!!".format(self.red,self.rst)) + print("") + print("Done.") + self.patch_warn() + self.u.grab("Press [enter] to return...") + return + + def fix_dmar(self): + dmar = next((table for table in self.d.acpi_tables.values() if table.get("signature") == b"DMAR"),None) + if not dmar: + d = None + while True: + self.u.head("Select DMAR Table") + print(" ") + if self.copy_as_path: + print("NOTE: Currently running as admin on Windows - drag and drop may not work.") + print(" Shift + right-click in Explorer and select 'Copy as path' then paste here instead.") + print("") + print("M. Main") + print("Q. Quit") + print(" ") + dmar = self.u.grab("Please drag and drop a DMAR table here: ") + if dmar.lower() == "m": + return + if dmar.lower() == "q": + self.u.custom_quit() + out = self.u.check_path(dmar) + if not out: continue + self.u.head("Loading DMAR Table") + print("") + print("Loading {}...".format(os.path.basename(out))) + if d is None: + d = dsdt.DSDT() # Initialize a new instance just for this + # Got a DMAR table, try to load it + d.load(out) + dmar = d.get_table_with_signature("DMAR") + if not dmar: continue + break + self.u.head("Patching DMAR") + print("") + print("Verifying signature...") + reserved = got_sig = False + new_dmar = ["// DMAR table with Reserved Memory Regions stripped\n"] + region_count = 0 + for line in dmar.get("lines",[]): + if 'Signature : "DMAR"' in line: + got_sig = True + print("Checking for Reserved Memory Regions...") + if not got_sig: continue # Skip until we find the signature + # If we find a reserved memory region, toggle our indicator + if "Subtable Type : 0001 [Reserved Memory Region]" in line: + region_count += 1 + reserved = True + # Check for a non-reserved memory region subtable type + elif "Subtable Type : " in line: + reserved = False + elif 'Oem ID : "' in line: + # Got the OEM - replace with CORP + line = line.split('"')[0] + '"CORP"' + elif 'Oem Table ID : "' in line: + # Got the OEM Table ID - replace with DMAR + line = line.split('"')[0] + '"DMAR"' + # Only append if we're not in a reserved memory region + if not reserved: + # Ensure any digits in Reserved : XX fields are 0s + if "Reserved : " in line: + res,value = line.split(" : ") + new_val = "" + for i,char in enumerate(value): + if not char in " 0123456789ABCDEF": + # Hit something else - dump the rest as-is into the val + new_val += value[i:] + break + elif char not in ("0"," "): + # Ensure we 0 out all non-0, non-space values + char = "0" + # Append the character + new_val += char + line = "{} : {}".format(res,new_val) + new_dmar.append(line) + if not got_sig: + print(" - Not found, does not appear to be a valid DMAR table.") + print("") + self.u.grab("Press [enter] to return...") + return + # Give the user some feedback + if not region_count: + # None found + print("No Reserved Memory Regions found - DMAR does not need patching.") + print("") + self.u.grab("Press [enter] to return to main menu...") + return + # We removed some regions + print("Located {:,} Reserved Memory Region{} - generating new table...".format(region_count,"" if region_count==1 else "s")) + self.write_ssdt("DMAR","\n".join(new_dmar).strip()) + oc = { + "Comment":"Replacement DMAR table with Reserved Memory Regions stripped - requires DMAR table be dropped", + "Enabled":True, + "Path":"DMAR.aml" + } + drop = ({ + "Comment":"Drop DMAR Table", + "Table":dmar, + "Signature":dmar.get("signature",b"DMAR") + },) + self.make_plist(oc, "DMAR.aml", (), drops=drop) + print("") + print("Done.") + self.patch_warn() + self.u.grab("Press [enter] to return...") + return + + def get_dev_at_adr(self,target_adr=0x001F0004,exclude_names=("XHC",)): + # Helper to walk tables looking for device + parent at a + # provided address + for table_name in self.sorted_nicely(list(self.d.acpi_tables)): + table = self.d.acpi_tables[table_name] + paths = self.d.get_path_of_type(obj_type="Name",obj="_ADR",table=table) + for path in paths: + adr = self.get_address_from_line(path[1],table=table) + # Check by address + # - Intel tables seem to have it at 0x001F0004 + # - AMD tables seem to have it at 0x00140000 + # Though this matches Intel chipset USB 3 controllers + # so we'll need to also check names and such. + if adr == target_adr: + # Ensure our path minus ._ADR is not top level, that we + # didn't match any devices with "XHC" in their name, and + # then return the path + parent path + table name + path_parts = path[0].split(".")[:-1] + if len(path_parts) > 1: + # Make sure we account for any excluded names + if exclude_names is None or not \ + any(x.lower() in path_parts[-1].lower() for x in exclude_names): + _path = ".".join(path_parts) + _parent = ".".join(path_parts[:-1]) + return (_path,_parent,table_name) + + def smbus(self): + if not self.ensure_dsdt(): + return + self.u.head("SMBus") + print("") + print("Gathering potential bus devices...") + bus_path = bus_parent = None + # It seems modern Intel uses 0x001F0004 for the SBus + # Legacy Intel uses 0x001F0003 though - which lines up + # with modern Intel HDEF/HDAS devices. + # AMD uses 0x00140000 - which lines up with Intel XHCI + # controllers. We'll have to try to narrow down which + # is valid. + # + # Get our devices at the potential addresses + dev_1F4 = self.get_dev_at_adr(0x001F0004) + dev_1F3 = self.get_dev_at_adr(0x001F0003,exclude_names=("AZAL","HDEF","HDAS")) + dev_1B = self.get_dev_at_adr(0x001B0000) + dev_14 = self.get_dev_at_adr(0x00140000) + # Initialize our bus_check var + bus_check = adr = None + # Iterate our checks in order + if dev_1F4 and dev_1F3: + # We got the newer Intel approach + bus_check = dev_1F4 + adr = 0x001F0004 + elif dev_1F3 and dev_1B: + # We got the older Intel approach + bus_check = dev_1F3 + adr = 0x001F0003 + elif dev_1F4: + # *Likely* newer Intel approach + bus_check = dev_1F4 + adr = 0x001F0004 + elif dev_1F3: + # *Likely* older Intel approach + bus_check = dev_1F3 + adr = 0x001F0003 + elif dev_14: + # Neither of the Intel approaches, + # *likely* AMD + bus_check = dev_14 + adr = 0x00140000 + if not bus_check: + # Never found it - report the error and bail + print(" - Could not locate a valid bus device! Aborting.") + print("") + self.u.grab("Press [enter] to return to main menu...") + return + # Break out our vars + bus_path,bus_parent,table_name = bus_check + print(" - Located {} (0x{}) in {}".format( + bus_path, + hex(adr)[2:].upper().rjust(8,"0"), + table_name) + ) + print("Creating SSDT-SBUS-MCHC...") + # At this point - we have both paths, let's build our table + ssdt = """/* + * SMBus compatibility table. + * Original from: https://github.com/acidanthera/OpenCorePkg/blob/master/Docs/AcpiSamples/Source/SSDT-SBUS-MCHC.dsl + */ +DefinitionBlock ("", "SSDT", 2, "CORP", "SBUSMCHC", 0x00000000) +{ + External ([[bus_parent]], DeviceObj) + External ([[bus_parent]].MCHC, DeviceObj) + External ([[bus_path]], DeviceObj) + + // Only create MCHC if it doesn't already exist + If (LNot (CondRefOf ([[bus_parent]].MCHC))) + { + Scope ([[bus_parent]]) + { + Device (MCHC) + { + Name (_ADR, Zero) // _ADR: Address + Method (_STA, 0, NotSerialized) // _STA: Status + { + If (_OSI ("Darwin")) + { + Return (0x0F) + } + Else + { + Return (Zero) + } + } + } + } + } + + Device ([[bus_path]].BUS0) + { + Name (_CID, "smbus") // _CID: Compatible ID + Name (_ADR, Zero) // _ADR: Address + + /* + * Uncomment replacing 0x57 with your own value which might be found + * in SMBus section of Intel datasheet for your motherboard. + * + * The "diagsvault" is the diagnostic vault where messages are stored. + * It's located at address 87 (0x57) on the SMBus controller. + * While "diagsvault" may refer to diags, a hardware diagnosis program via EFI for Macs + * that communicates with the SMBus controller, the effect is really unknown for hacks. + * Uncomment this with caution. + */ + + /** + Device (DVL0) + { + Name (_ADR, 0x57) // _ADR: Address + Name (_CID, "diagsvault") // _CID: Compatible ID + Method (_DSM, 4, NotSerialized) // _DSM: Device-Specific Method + { + If (!Arg2) + { + Return (Buffer (One) + { + 0x57 // W + }) + } + + Return (Package (0x02) + { + "address", + 0x57 + }) + } + } + **/ + + Method (_STA, 0, NotSerialized) // _STA: Status + { + If (_OSI ("Darwin")) + { + Return (0x0F) + } + Else + { + Return (Zero) + } + } + } +}""".replace("[[bus_parent]]",bus_parent).replace("[[bus_path]]",bus_path) + oc = { + "Comment":"Defines an MCHC and BUS0 device for SMBus compatibility", + "Enabled":True, + "Path":"SSDT-SBUS-MCHC.aml" + } + self.write_ssdt("SSDT-SBUS-MCHC",ssdt) + self.make_plist(oc, "SSDT-SBUS-MCHC.aml", ()) + print("") + print("Done.") + self.patch_warn() + self.u.grab("Press [enter] to return...") + return + + def ambient_light_sensor(self): + if not self.ensure_dsdt(): + return + self.u.head("Ambient Light Sensor") + print("") + print("Locating ACPI0008 (ALS) devices...") + for table_name in self.sorted_nicely(list(self.d.acpi_tables)): + table = self.d.acpi_tables[table_name] + print(" Checking {}...".format(table_name)) + # Try to find any ambient light sensor devices in the + # current table + als = self.d.get_device_paths_with_hid("ACPI0008",table=table) + if als: + print(" - Found at {}".format(als[0][0])) + print(" --> No fake needed!") + # Check for an _STA var, and override it if need be + sta = self.get_sta_var( + var=None, + device=als[0][0], + dev_hid="ACPI0008", + dev_name=als[0][0].split(".")[-1], + log_locate=False, + table=table + ) + if sta.get("patches"): + if self.sta_needs_patching(sta, table=table): + # We need to write a quick SSDT to force enable our + # light sensor + print("Creating SSDT-ALS0...") + ssdt = """ +DefinitionBlock ("", "SSDT", 2, "CORP", "ALS0", 0x00000000) +{ + External ([[als0_path]], DeviceObj) + External ([[als0_path]]._STA, [[sta_type]]) + External ([[als0_path]].XSTA, [[sta_type]]) + + If (LAnd (CondRefOf ([[als0_path]].XSTA), LNot (CondRefOf ([[als0_path]]._STA)))) + { + Scope ([[als0_path]]) + { + // Override our original Light Sensor _STA method + Method (_STA, 0, NotSerialized) // _STA: Status + { + If (_OSI ("Darwin")) + { + // Explicitly enable the Light Sensor in macOS + Return (0x0F) + } + Else + { + // Call the original, now renamed _STA method + // if we're not booting macOS + Return ([[XSTA]]) + } + } + } + } +}""".replace("[[als0_path]]",als[0][0]) \ +.replace("[[sta_type]]",sta.get("sta_type","MethodObj")) \ +.replace("[[XSTA]]","{}.XSTA{}".format(als[0][0]," ()" if sta.get("sta_type","MethodObj")=="MethodObj" else "")) + oc = { + "Comment":"Enables {} in macOS - requires _STA to XSTA rename".format(sta["dev_name"]), + "Enabled":True, + "Path":"SSDT-ALS0.aml" + } + self.write_ssdt("SSDT-ALS0",ssdt) + self.make_plist(oc,"SSDT-ALS0.aml",sta.get("patches",[])) + print("") + print("Done.") + self.patch_warn() + self.u.grab("Press [enter] to return...") + return + else: + print(" --> _STA properly enabled - no patching needed!") + else: + print(" --> Not found - no patching needed!") + print("") + self.u.grab("Press [enter] to return to main menu...") + return + # If we got here - we didn't find any + print("No ACPI0008 (ALS) devices found - fake needed...") + print("Creating SSDT-ALS0...") + ssdt = """// +// Original source from: +// https://github.com/acidanthera/OpenCorePkg/blob/master/Docs/AcpiSamples/Source/SSDT-ALS0.dsl +// +DefinitionBlock ("", "SSDT", 2, "CORP", "ALS0", 0x00000000) +{ + Scope (_SB) + { + Device (ALS0) + { + Name (_HID, "ACPI0008" /* Ambient Light Sensor Device */) // _HID: Hardware ID + Name (_CID, "smc-als") // _CID: Compatible ID + Name (_ALI, 0x012C) // _ALI: Ambient Light Illuminance + Name (_ALR, Package (0x01) // _ALR: Ambient Light Response + { + Package (0x02) + { + 0x64, + 0x012C + } + }) + Method (_STA, 0, NotSerialized) // _STA: Status + { + If (_OSI ("Darwin")) + { + Return (0x0F) + } + Else + { + Return (Zero) + } + } + } + } +}""" + oc = { + "Comment":"Faked Ambient Light Sensor", + "Enabled":True, + "Path":"SSDT-ALS0.aml" + } + self.write_ssdt("SSDT-ALS0",ssdt) + self.make_plist(oc,"SSDT-ALS0.aml",()) + print("") + print("Done.") + self.patch_warn() + self.u.grab("Press [enter] to return...") + return + + def imei_bridge(self): + if not self.ensure_dsdt(): + return + def fake_note(): + print("\n{}NOTE:{} Ensure you fake the IMEI device-id as applicable in DeviceProperties:".format(self.yel,self.rst)) + print("") + print(" If you have a Sandy Bridge CPU with a 7-series chipset:") + print(" - device-id | Data | <3A1C0000>") + print("") + print(" If you have an Ivy Bridge CPU with a 6-series chipset:") + print(" - device-id | Data | <3A1E0000>") + print("") + def print_line(line,lines=[]): + print(line) + lines.append(line) + return lines + self.u.head("IMEI Bridge") + lines = print_line("") + lines = print_line("Locating IMEI devices at address 0x00160000...",lines) + imei = self.get_dev_at_adr(0x00160000) + if imei: + print(" - Located at {}".format( + imei[0] + )) + print(" --> No bridge needed!") + fake_note() + self.u.grab("Press [enter] to return to main menu...") + return + # We didn't find it + lines = print_line(" - Not located - bridge needed",lines) + lines = print_line("Checking for parent device...") + lines = print_line(" - Locating iGPU device at address 0x00020000...",lines) + parent = None + igpu = self.get_dev_at_adr(0x00020000) + if not igpu: + lines = print_line(" --> Not located!",lines) + lines = print_line(" - Attempting to locate PCI Roots...",lines) + pci_roots = [] + for table_name in self.sorted_nicely(list(self.d.acpi_tables)): + table = self.d.acpi_tables[table_name] + pci_roots = self.d.get_device_paths_with_id(_id="PNP0A08",table=table) + pci_roots += self.d.get_device_paths_with_id(_id="PNP0A03",table=table) + pci_roots += self.d.get_device_paths_with_id(_id="ACPI0016",table=table) + if pci_roots: + break # Bail on the first match + if not pci_roots: + print(" --> None found! Cannot continue.") + print("") + self.u.grab("Press [enter] to reeturn to main menu...") + return + parent = pci_roots[0][0] + lines = print_line(" --> Located at {}".format(parent),lines) + else: + lines = print_line(" --> Located at {}".format(igpu[0]),lines) + parent = ".".join(igpu[0].split(".")[:-1]) + lines = print_line(" --> Using parent: {}".format(parent),lines) + lines = print_line("Gathering device-id approach...") + # Ask the user what approach they're using + approach = None + while True: + self.u.head("Fake Device-ID") + print("") + print("Select your current CPU and chipset configuration:") + print("") + print("1. Sandy Bridge CPU with 7-series chipset") + print("2. Ivy Bridge CPU with 6-series chipset") + print("3. Do not fake device-id in SSDT (requires DeviceProperties)") + print("") + print("M. Main Menu") + print("Q. Quit") + print("") + m = self.u.grab("Please select an option: ") + if not m: continue + if m.lower() == "m": + return + if m.lower() == "q": + self.u.custom_quit() + if m not in ("1","2","3"): + continue + approach = {"1":1,"2":2}.get(m) + break + # Restore the lines up to this point + self.u.head("IMEI Bridge") + print("\n".join(lines)) + if approach is None: + print(" - Only building bridge, must fake using DeviceProperties!") + elif approach == 1: + print(" - Faking IMEI as 6-series to match Sandy Bridge CPU") + else: + print(" - Faking IMEI as 7-series to match Ivy Bridge CPU") + print("Creating SSDT-IMEI...") + ssdt = """// +// Original source from: +// https://github.com/acidanthera/OpenCorePkg/blob/master/Docs/AcpiSamples/Source/SSDT-IMEI.dsl +// +DefinitionBlock ("", "SSDT", 2, "CORP", "IMEI", 0x00000000) +{ + External ([[parent]], DeviceObj) + + Scope ([[parent]]) + { + Device (IMEI) + { + Name (_ADR, 0x00160000) // _ADR: Address""".replace("[[parent]]",parent) + if approach is not None: + # We're providing the fake in-line + ssdt += """ + Method (_DSM, 4, NotSerialized) + { + If (LEqual (Arg2, Zero)) { + Return (Buffer (One) { 0x03 }) + } + Return (Package (0x02) + { + "device-id", + Buffer (0x04) { 0x3A, 0x1[[fake]], 0x00, 0x00 } + }) + }""".replace("[[fake]]","C" if approach == 1 else "E") + ssdt += """ + } + } +} +""" + oc = { + "Comment":"IMEI Bridge to Allow Faking for Exotic CPU+Mobo Configurations", + "Enabled":True, + "Path":"SSDT-IMEI.aml" + } + self.write_ssdt("SSDT-IMEI",ssdt) + self.make_plist(oc,"SSDT-IMEI.aml",()) + if approach is None: + fake_note() + else: + print("") + print("Done.") + self.patch_warn() + self.u.grab("Press [enter] to return...") + return + + def pick_match_mode(self): + while True: + self.u.head("Select OpenCore Match Mode") + print("") + print("1. {}:".format(self.match_dict[0])) + print(" - Signature/Table ID Matching: {} ANY {}".format(self.red,self.rst)) + print(" - Table Length Matching: {} ANY {}".format(self.red,self.rst)) + print("2. {}:".format(self.match_dict[1])) + print(" - Signature/Table ID Matching: {} ANY {}".format(self.red,self.rst)) + print(" - Table Length Matching: {}STRICT{}".format(self.grn,self.rst)) + print("3. {}:".format(self.match_dict[2])) + print(" !! Requires NormalizeHeaders quirk is {}DISABLED{} in config.plist !!".format(self.red,self.rst)) + print(" - Signature/Table ID Matching: {}STRICT{}".format(self.grn,self.rst)) + print(" - Table Length Matching: {}STRICT{}".format(self.grn,self.rst)) + print("4. {}:".format(self.match_dict[3])) + print(" !! Requires NormalizeHeaders quirk is {}ENABLED{} in config.plist !!".format(self.grn,self.rst)) + print(" - Signature/Table ID Matching: {}STRICT{}".format(self.grn,self.rst)) + print(" - Table Length Matching: {}STRICT{}".format(self.grn,self.rst)) + print("") + print("Current Match Mode: {}".format(self.match_dict.get(self.match_mode,list(self.match_dict)[0]))) + print("M. Return to Menu") + print("Q. Quit") + print("") + menu = self.u.grab("Please select an option: ") + if not len(menu): + continue + elif menu.lower() == "m": + return + elif menu.lower() == "q": + self.u.custom_quit() + elif menu in ("1","2","3","4"): + self.match_mode = int(menu)-1 + return + + def main(self): + cwd = os.getcwd() + lines=[""] + if self.dsdt and self.d.acpi_tables: + lines.append("Currently Loaded Tables ({:,}):".format(len(self.d.acpi_tables))) + lines.append("") + lines.extend([" "+x for x in textwrap.wrap( + " ".join(self.sorted_nicely(list(self.d.acpi_tables))), + width=70, # Limit the width to 80 for aesthetics + break_on_hyphens=False + )]) + lines.extend([ + "", + "Loaded From: {}".format(self.dsdt) + ]) + else: + lines.append("Currently Loaded Tables: None") + lines.append("") + lines.append("1. FixHPET - Patch Out IRQ Conflicts") + lines.append("2. FakeEC - OS-Aware Fake EC") + lines.append("3. FakeEC Laptop - Only Builds Fake EC - Leaves Existing Untouched") + lines.append("4. USBX - Power properties for USB on SKL and newer SMBIOS") + lines.append("5. PluginType - Redefines CPU Objects as Processor and sets plugin-type = 1") + lines.append("6. PMC - Enables Native NVRAM on True 300-Series Boards") + lines.append("7. RTCAWAC - Context-Aware AWAC Disable and RTC Enable/Fake/Range Fix") + lines.append("8. USB Reset - Reset USB controllers to allow hardware mapping") + lines.append("9. PCI Bridge - Create missing PCI bridges for passed device path") + lines.append("0. PNLF - Sets up a PNLF device for laptop backlight control") + lines.append("A. XOSI - _OSI rename and patch to return true for a range of Windows") + lines.append(" versions - also checks for OSID") + lines.append("B. Fix DMAR - Remove Reserved Memory Regions from the DMAR table") + lines.append("C. SMBus - Defines an MCHC and BUS0 device for SMBus compatibility") + lines.append("E. ACPI > Device - Searches the loaded tables for the passed ACPI path and") + lines.append(" prints the corresponding Device Path") + lines.append("F. ALS0 - Defines a fake Ambient Light Sensor") + lines.append("G. IMEI Bridge - Defines IMEI - only needed on SNB+7-series or IVB+6-series") + lines.append("") + if sys.platform.startswith("linux") or sys.platform == "win32": + lines.append("P. Dump the current system's ACPI tables") + if self.d.iasl_legacy: + lines.append("L. Use Legacy Compiler for macOS 10.6 and prior: {}".format("{}!! Enabled !!{}".format(self.yel,self.rst) if self.iasl_legacy else "Disabled")) + lines.append("D. Select ACPI table or folder containing tables") + lines.append("M. OpenCore Match Mode: {}".format( + self.match_dict.get(self.match_mode,list(self.match_dict)[0]) + )) + lines.append("R. {} Window Resizing".format("Enable" if not self.resize_window else "Disable")) + lines.append("Q. Quit") + lines.append("") + if self.resize_window: + self.u.resize(self.w,max(self.h,len(lines)+4)) + self.u.head() + print("\n".join(lines)) + menu = self.u.grab("Please make a selection: ") + if not len(menu): + return + if self.resize_window: + self.u.resize(self.w,self.h) + if menu.lower() == "q": + self.u.custom_quit() + if menu.lower() == "d": + self.dsdt = self.select_dsdt() + return + if menu == "1": + self.fix_hpet() + elif menu == "2": + self.fake_ec() + elif menu == "3": + self.fake_ec(True) + elif menu == "4": + self.ssdt_usbx() + elif menu == "5": + self.plugin_type() + elif menu == "6": + self.ssdt_pmc() + elif menu == "7": + self.ssdt_awac() + elif menu == "8": + self.ssdt_rhub() + elif menu == "9": + self.pci_bridge() + elif menu == "0": + self.ssdt_pnlf() + elif menu.lower() == "a": + self.ssdt_xosi() + elif menu.lower() == "b": + self.fix_dmar() + elif menu.lower() == "c": + self.smbus() + elif menu.lower() == "e": + self.acpi_device_path() + elif menu.lower() == "f": + self.ambient_light_sensor() + elif menu.lower() == "g": + self.imei_bridge() + elif menu.lower() == "p" and (sys.platform.startswith("linux") or sys.platform == "win32"): + output_folder = os.path.join(os.path.dirname(os.path.realpath(__file__)),self.output) + acpi_name = self.get_unique_name("OEM",output_folder,name_append="") + self.dsdt = self.load_dsdt( + self.d.dump_tables(os.path.join(output_folder,acpi_name)) + ) + elif menu.lower() == "l" and self.d.iasl_legacy: + self.iasl_legacy = not self.iasl_legacy + self.save_settings() + elif menu.lower() == "m": + self.pick_match_mode() + self.save_settings() + elif menu.lower() == "r": + self.resize_window ^= True + self.save_settings() + return + +if __name__ == '__main__': + if 2/3 == 0: input = raw_input + s = SSDT() + while True: + try: + s.main() + except Exception as e: + print("An error occurred: {}".format(e)) + input("Press [enter] to continue...") diff --git a/ACPI/SSDTTime-master/Scripts/__init__.py b/ACPI/SSDTTime-master/Scripts/__init__.py new file mode 100644 index 0000000..962c1d3 --- /dev/null +++ b/ACPI/SSDTTime-master/Scripts/__init__.py @@ -0,0 +1,4 @@ +from os.path import dirname, basename, isfile +import glob +modules = glob.glob(dirname(__file__)+"/*.py") +__all__ = [ basename(f)[:-3] for f in modules if isfile(f) and not f.endswith('__init__.py')] \ No newline at end of file diff --git a/ACPI/SSDTTime-master/Scripts/downloader.py b/ACPI/SSDTTime-master/Scripts/downloader.py new file mode 100644 index 0000000..c5b3b40 --- /dev/null +++ b/ACPI/SSDTTime-master/Scripts/downloader.py @@ -0,0 +1,330 @@ +import sys, os, time, ssl, gzip, multiprocessing +from io import BytesIO +# Python-aware urllib stuff +try: + from urllib.request import urlopen, Request + import queue as q +except ImportError: + # Import urllib2 to catch errors + import urllib2 + from urllib2 import urlopen, Request + import Queue as q + +TERMINAL_WIDTH = 120 if os.name=="nt" else 80 + +def get_size(size, suffix=None, use_1024=False, round_to=2, strip_zeroes=False): + # size is the number of bytes + # suffix is the target suffix to locate (B, KB, MB, etc) - if found + # use_2014 denotes whether or not we display in MiB vs MB + # round_to is the number of dedimal points to round our result to (0-15) + # strip_zeroes denotes whether we strip out zeroes + + # Failsafe in case our size is unknown + if size == -1: + return "Unknown" + # Get our suffixes based on use_1024 + ext = ["B","KiB","MiB","GiB","TiB","PiB"] if use_1024 else ["B","KB","MB","GB","TB","PB"] + div = 1024 if use_1024 else 1000 + s = float(size) + s_dict = {} # Initialize our dict + # Iterate the ext list, and divide by 1000 or 1024 each time to setup the dict {ext:val} + for e in ext: + s_dict[e] = s + s /= div + # Get our suffix if provided - will be set to None if not found, or if started as None + suffix = next((x for x in ext if x.lower() == suffix.lower()),None) if suffix else suffix + # Get the largest value that's still over 1 + biggest = suffix if suffix else next((x for x in ext[::-1] if s_dict[x] >= 1), "B") + # Determine our rounding approach - first make sure it's an int; default to 2 on error + try:round_to=int(round_to) + except:round_to=2 + round_to = 0 if round_to < 0 else 15 if round_to > 15 else round_to # Ensure it's between 0 and 15 + bval = round(s_dict[biggest], round_to) + # Split our number based on decimal points + a,b = str(bval).split(".") + # Check if we need to strip or pad zeroes + b = b.rstrip("0") if strip_zeroes else b.ljust(round_to,"0") if round_to > 0 else "" + return "{:,}{} {}".format(int(a),"" if not b else "."+b,biggest) + +def _process_hook(queue, total_size, bytes_so_far=0, update_interval=1.0, max_packets=0): + packets = [] + speed = remaining = "" + last_update = time.time() + while True: + # Write our info first so we have *some* status while + # waiting for packets + if total_size > 0: + percent = float(bytes_so_far) / total_size + percent = round(percent*100, 2) + t_s = get_size(total_size) + try: + b_s = get_size(bytes_so_far, t_s.split(" ")[1]) + except: + b_s = get_size(bytes_so_far) + perc_str = " {:.2f}%".format(percent) + bar_width = (TERMINAL_WIDTH // 3)-len(perc_str) + progress = "=" * int(bar_width * (percent/100)) + sys.stdout.write("\r\033[K{}/{} | {}{}{}{}{}".format( + b_s, + t_s, + progress, + " " * (bar_width-len(progress)), + perc_str, + speed, + remaining + )) + else: + b_s = get_size(bytes_so_far) + sys.stdout.write("\r\033[K{}{}".format(b_s, speed)) + sys.stdout.flush() + # Now we gather the next packet + try: + packet = queue.get(timeout=update_interval) + # Packets should be formatted as a tuple of + # (timestamp, len(bytes_downloaded)) + # If "DONE" is passed, we assume the download + # finished - and bail + if packet == "DONE": + print("") # Jump to the next line + return + # Append our packet to the list and ensure we're not + # beyond our max. + # Only check max if it's > 0 + packets.append(packet) + if max_packets > 0: + packets = packets[-max_packets:] + # Increment our bytes so far as well + bytes_so_far += packet[1] + except q.Empty: + # Didn't get anything - reset the speed + # and packets + packets = [] + speed = " | 0 B/s" + remaining = " | ?? left" if total_size > 0 else "" + except KeyboardInterrupt: + print("") # Jump to the next line + return + # If we have packets and it's time for an update, process + # the info. + update_check = time.time() + if packets and update_check - last_update >= update_interval: + last_update = update_check # Refresh our update timestamp + speed = " | ?? B/s" + if len(packets) > 1: + # Let's calculate the amount downloaded over how long + try: + first,last = packets[0][0],packets[-1][0] + chunks = sum([float(x[1]) for x in packets]) + t = last-first + assert t >= 0 + bytes_speed = 1. / t * chunks + speed = " | {}/s".format(get_size(bytes_speed,round_to=1)) + # Get our remaining time + if total_size > 0: + seconds_left = (total_size-bytes_so_far) / bytes_speed + days = seconds_left // 86400 + hours = (seconds_left - (days*86400)) // 3600 + mins = (seconds_left - (days*86400) - (hours*3600)) // 60 + secs = seconds_left - (days*86400) - (hours*3600) - (mins*60) + if days > 99 or bytes_speed == 0: + remaining = " | ?? left" + else: + remaining = " | {}{:02d}:{:02d}:{:02d} left".format( + "{}:".format(int(days)) if days else "", + int(hours), + int(mins), + int(round(secs)) + ) + except: + pass + # Clear the packets so we don't reuse the same ones + packets = [] + +class Downloader: + + def __init__(self,**kwargs): + self.ua = kwargs.get("useragent",{"User-Agent":"Mozilla"}) + self.chunk = 1048576 # 1024 x 1024 i.e. 1MiB + if os.name=="nt": os.system("color") # Initialize cmd for ANSI escapes + # Provide reasonable default logic to workaround macOS CA file handling + cafile = ssl.get_default_verify_paths().openssl_cafile + try: + # If default OpenSSL CA file does not exist, use that from certifi + if not os.path.exists(cafile): + import certifi + cafile = certifi.where() + self.ssl_context = ssl.create_default_context(cafile=cafile) + except: + # None of the above worked, disable certificate verification for now + self.ssl_context = ssl._create_unverified_context() + return + + def _decode(self, value, encoding="utf-8", errors="ignore"): + # Helper method to only decode if bytes type + if sys.version_info >= (3,0) and isinstance(value, bytes): + return value.decode(encoding,errors) + return value + + def _update_main_name(self): + # Windows running python 2 seems to have issues with multiprocessing + # if the case of the main script's name is incorrect: + # e.g. Downloader.py vs downloader.py + # + # To work around this, we try to scrape for the correct case if + # possible. + try: + path = os.path.abspath(sys.modules["__main__"].__file__) + except AttributeError as e: + # This likely means we're running from the interpreter + # directly + return None + if not os.path.isfile(path): + return None + # Get the file name and folder path + name = os.path.basename(path).lower() + fldr = os.path.dirname(path) + # Walk the files in the folder until we find our + # name - then steal its case and update that path + for f in os.listdir(fldr): + if f.lower() == name: + # Got it + new_path = os.path.join(fldr,f) + sys.modules["__main__"].__file__ = new_path + return new_path + # If we got here, it wasn't found + return None + + def _get_headers(self, headers = None): + # Fall back on the default ua if none provided + target = headers if isinstance(headers,dict) else self.ua + new_headers = {} + # Shallow copy to prevent changes to the headers + # overriding the original + for k in target: + new_headers[k] = target[k] + return new_headers + + def open_url(self, url, headers = None): + headers = self._get_headers(headers) + # Wrap up the try/except block so we don't have to do this for each function + try: + response = urlopen(Request(url, headers=headers), context=self.ssl_context) + except Exception as e: + # No fixing this - bail + return None + return response + + def get_size(self, *args, **kwargs): + return get_size(*args,**kwargs) + + def get_string(self, url, progress = True, headers = None, expand_gzip = True): + response = self.get_bytes(url,progress,headers,expand_gzip) + if response is None: return None + return self._decode(response) + + def get_bytes(self, url, progress = True, headers = None, expand_gzip = True): + response = self.open_url(url, headers) + if response is None: return None + try: total_size = int(response.headers['Content-Length']) + except: total_size = -1 + chunk_so_far = b"" + packets = queue = process = None + if progress: + # Make sure our vars are initialized + packets = [] if progress else None + queue = multiprocessing.Queue() + # Create the multiprocess and start it + process = multiprocessing.Process( + target=_process_hook, + args=(queue,total_size) + ) + process.daemon = True + # Filthy hack for earlier python versions on Windows + if os.name == "nt" and hasattr(multiprocessing,"forking"): + self._update_main_name() + process.start() + try: + while True: + chunk = response.read(self.chunk) + if progress: + # Add our items to the queue + queue.put((time.time(),len(chunk))) + if not chunk: break + chunk_so_far += chunk + finally: + # Close the response whenever we're done + response.close() + if expand_gzip and response.headers.get("Content-Encoding","unknown").lower() == "gzip": + fileobj = BytesIO(chunk_so_far) + gfile = gzip.GzipFile(fileobj=fileobj) + return gfile.read() + if progress: + # Finalize the queue and wait + queue.put("DONE") + process.join() + return chunk_so_far + + def stream_to_file(self, url, file_path, progress = True, headers = None, ensure_size_if_present = True, allow_resume = False): + response = self.open_url(url, headers) + if response is None: return None + bytes_so_far = 0 + try: total_size = int(response.headers['Content-Length']) + except: total_size = -1 + packets = queue = process = None + mode = "wb" + if allow_resume and os.path.isfile(file_path) and total_size != -1: + # File exists, we're resuming and have a target size. Check the + # local file size. + current_size = os.stat(file_path).st_size + if current_size == total_size: + # File is already complete - return the path + return file_path + elif current_size < total_size: + response.close() + # File is not complete - seek to our current size + bytes_so_far = current_size + mode = "ab" # Append + # We also need to try creating a new request + # in order to pass our range header + new_headers = self._get_headers(headers) + # Get the start byte, 0-indexed + byte_string = "bytes={}-".format(current_size) + new_headers["Range"] = byte_string + response = self.open_url(url, new_headers) + if response is None: return None + if progress: + # Make sure our vars are initialized + packets = [] if progress else None + queue = multiprocessing.Queue() + # Create the multiprocess and start it + process = multiprocessing.Process( + target=_process_hook, + args=(queue,total_size,bytes_so_far) + ) + process.daemon = True + # Filthy hack for earlier python versions on Windows + if os.name == "nt" and hasattr(multiprocessing,"forking"): + self._update_main_name() + process.start() + with open(file_path,mode) as f: + try: + while True: + chunk = response.read(self.chunk) + bytes_so_far += len(chunk) + if progress: + # Add our items to the queue + queue.put((time.time(),len(chunk))) + if not chunk: break + f.write(chunk) + finally: + # Close the response whenever we're done + response.close() + if progress: + # Finalize the queue and wait + queue.put("DONE") + process.join() + if ensure_size_if_present and total_size != -1: + # We're verifying size - make sure we got what we asked for + if bytes_so_far != total_size: + return None # We didn't - imply it failed + return file_path if os.path.exists(file_path) else None diff --git a/ACPI/SSDTTime-master/Scripts/dsdt.py b/ACPI/SSDTTime-master/Scripts/dsdt.py new file mode 100644 index 0000000..1debe51 --- /dev/null +++ b/ACPI/SSDTTime-master/Scripts/dsdt.py @@ -0,0 +1,907 @@ +import os, errno, tempfile, shutil, plistlib, sys, binascii, zipfile, getpass, re +from . import run, downloader, utils + +try: + FileNotFoundError +except NameError: + FileNotFoundError = IOError + +class DSDT: + def __init__(self, **kwargs): + self.dl = downloader.Downloader() + self.r = run.Run() + self.u = utils.Utils("SSDT Time") + self.iasl_url_macOS = "https://raw.githubusercontent.com/acidanthera/MaciASL/master/Dist/iasl-stable" + self.iasl_url_macOS_legacy = "https://raw.githubusercontent.com/acidanthera/MaciASL/master/Dist/iasl-legacy" + self.iasl_url_linux = "https://raw.githubusercontent.com/corpnewt/linux_iasl/main/iasl.zip" + self.iasl_url_linux_legacy = "https://raw.githubusercontent.com/corpnewt/iasl-legacy/main/iasl-legacy-linux.zip" + self.acpi_github_windows = "https://github.com/acpica/acpica/releases/latest" + self.acpi_binary_tools = "https://www.intel.com/content/www/us/en/developer/topic-technology/open/acpica/download.html" + self.iasl_url_windows_legacy = "https://raw.githubusercontent.com/corpnewt/iasl-legacy/main/iasl-legacy-windows.zip" + self.h = {} # {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"} + self.iasl = self.check_iasl() + self.iasl_legacy = self.check_iasl(legacy=True) + if not self.iasl: + url = (self.acpi_github_windows,self.acpi_binary_tools) if os.name=="nt" else \ + self.iasl_url_macOS if sys.platform=="darwin" else \ + self.iasl_url_linux if sys.platform.startswith("linux") else None + exception = "Could not locate or download iasl!" + if url: + exception += "\n\nPlease manually download {} from:\n - {}\n\nAnd place in:\n - {}\n".format( + "and extract iasl.exe and acpidump.exe" if os.name=="nt" else "iasl", + "\n - ".join(url) if isinstance(url,(list,tuple)) else url, + os.path.dirname(os.path.realpath(__file__)) + ) + raise Exception(exception) + self.allowed_signatures = (b"APIC",b"DMAR",b"DSDT",b"SSDT") + self.mixed_listing = (b"DSDT",b"SSDT") + self.acpi_tables = {} + # Setup regex matches + self.hex_match = re.compile(r"^\s*[0-9A-F]{4,}:(\s[0-9A-F]{2})+(\s+\/\/.*)?$") + self.type_match = re.compile(r".*(?PProcessor|Scope|Device|Method|Name) \((?P[^,\)]+).*") + + def _table_signature(self, table_path, table_name = None, data = None): + path = os.path.join(table_path,table_name) if table_name else table_path + if not os.path.isfile(path): + return None + if data: + # Got data - make sure there's enough for a signature + if len(data) >= 4: + return data[:4] + else: + return None + # Try to load it and read the first 4 bytes to verify the + # signature + with open(path,"rb") as f: + try: + return f.read(4) + except: + pass + return None + + def non_ascii_count(self, data): + # Helper to emulate the ACPI_IS_ASCII macro from ACPICA's code + # It just appears to check if the passed byte is < 0x80 + # We'll check all available data though - and return the number + # of non-ascii bytes + non_ascii = 0 + for b in data: + if not isinstance(b,int): + try: b = ord(b) + except: b = -1 + if not b < 0x80: + non_ascii += 1 + return non_ascii + + def table_is_valid(self, table_path, table_name = None, ensure_binary = True, check_signature = True): + # Ensure we have a valid file + path = os.path.join(table_path,table_name) if table_name else table_path + if not os.path.isfile(path): + return False + # Set up a data placeholder + data = None + if ensure_binary is not None: + # Make sure the table is the right type - load it + # and read the data + with open(path,"rb") as f: + data = f.read() + # Make sure we actually got some data + if not data: + return False + # Gather the non-ASCII char count + non_ascii_count = self.non_ascii_count(data) + if ensure_binary and not non_ascii_count: + # We want a binary, but it's all ascii + return False + elif not ensure_binary and non_ascii_count: + # We want ascii, and got a binary + return False + if check_signature: + if not self._table_signature(path,data=data) in self.allowed_signatures: + # Check with the function - we didn't load the table + # already + return False + # If we got here - the table passed our checks + return True + + def get_ascii_print(self, data): + # Helper to sanitize unprintable characters by replacing them with + # ? where needed + unprintables = False + ascii_string = "" + for b in data: + if not isinstance(b,int): + try: b = ord(b) + except: b = -1 + if ord(" ") <= b < ord("~"): + ascii_string += chr(b) + else: + ascii_string += "?" + unprintables = True + return (unprintables,ascii_string) + + def load(self, table_path): + # Attempt to load the passed file - or if a directory + # was passed, load all .aml and .dat files within + cwd = os.getcwd() + temp = None + target_files = {} + failed = [] + try: + if os.path.isdir(table_path): + # Got a directory - gather all valid + # files in the directory + valid_files = [ + x for x in os.listdir(table_path) if self.table_is_valid(table_path,x) + ] + elif os.path.isfile(table_path): + # Just loading the one table - don't check + # the signature - but make sure it's binary + if self.table_is_valid(table_path,check_signature=False): + valid_files = [table_path] + else: + # Not valid - raise an error + raise FileNotFoundError( + errno.ENOENT, + os.strerror(errno.ENOENT), + "{} is not a valid .aml/.dat file.".format(table_path) + ) + else: + # Not a valid path + raise FileNotFoundError( + errno.ENOENT, + os.strerror(errno.ENOENT), + table_path + ) + if not valid_files: + # No valid files were found + raise FileNotFoundError( + errno.ENOENT, + os.strerror(errno.ENOENT), + "No valid .aml/.dat files found at {}".format(table_path) + ) + # Create a temp dir and copy all files there + temp = tempfile.mkdtemp() + for file in valid_files: + shutil.copy( + os.path.join(table_path,file), + temp + ) + # Build a list of all target files in the temp folder - and save + # the disassembled_name for each to verify after + list_dir = os.listdir(temp) + for x in list_dir: + if len(list_dir) > 1 and not self.table_is_valid(temp,x): + continue # Skip invalid files when multiple are passed + name_ext = [y for y in os.path.basename(x).split(".") if y] + if name_ext and name_ext[-1].lower() in ("asl","dsl"): + continue # Skip any already disassembled files + target_files[x] = { + "assembled_name": os.path.basename(x), + "disassembled_name": ".".join(x.split(".")[:-1]) + ".dsl", + } + if not target_files: + # Somehow we ended up with none? + raise FileNotFoundError( + errno.ENOENT, + os.strerror(errno.ENOENT), + "No valid .aml/.dat files found at {}".format(table_path) + ) + os.chdir(temp) + # Generate and run a command + dsdt_or_ssdt = [x for x in list(target_files) if self._table_signature(temp,x) in self.mixed_listing] + other_tables = [x for x in list(target_files) if not x in dsdt_or_ssdt] + out_d = ("","",0) + out_t = ("","",0) + + def exists(folder_path,file_name): + # Helper to make sure the file exists and has a non-Zero size + check_path = os.path.join(folder_path,file_name) + if os.path.isfile(check_path) and os.stat(check_path).st_size > 0: + return True + return False + + # Check our DSDT and SSDTs first + if dsdt_or_ssdt: + args = [self.iasl,"-da","-dl","-l"]+list(dsdt_or_ssdt) + out_d = self.r.run({"args":args}) + if out_d[2] != 0: + # Attempt to run without `-da` if the above failed + args = [self.iasl,"-dl","-l"]+list(dsdt_or_ssdt) + out_d = self.r.run({"args":args}) + # Get a list of disassembled names that failed + fail_temp = [] + for x in dsdt_or_ssdt: + if not exists(temp,target_files[x]["disassembled_name"]): + fail_temp.append(x) + # Let's try to disassemble any that failed individually + for x in fail_temp: + args = [self.iasl,"-dl","-l",x] + self.r.run({"args":args}) + if not exists(temp,target_files[x]["disassembled_name"]): + failed.append(x) + # Check for other tables (DMAR, APIC, etc) + if other_tables: + args = [self.iasl]+list(other_tables) + out_t = self.r.run({"args":args}) + # Get a list of disassembled names that failed + for x in other_tables: + if not exists(temp,target_files[x]["disassembled_name"]): + failed.append(x) + if len(failed) == len(target_files): + raise Exception("Failed to disassemble - {}".format(", ".join(failed))) + # Actually process the tables now + to_remove = [] + for file in target_files: + # We need to load the .aml and .dsl into memory + # and get the paths and scopes + if not exists(temp,target_files[file]["disassembled_name"]): + to_remove.append(file) + continue + with open(os.path.join(temp,target_files[file]["disassembled_name"]),"r") as f: + target_files[file]["table"] = f.read() + # Remove the compiler info at the start + if target_files[file]["table"].startswith("/*"): + target_files[file]["table"] = "*/".join(target_files[file]["table"].split("*/")[1:]).strip() + # Check for "Table Header:" or "Raw Table Data: Length" and strip everything + # after the last occurrence + for h in ("\nTable Header:","\nRaw Table Data: Length"): + if h in target_files[file]["table"]: + target_files[file]["table"] = h.join(target_files[file]["table"].split(h)[:-1]).rstrip() + break # Bail on the first match + target_files[file]["lines"] = target_files[file]["table"].split("\n") + target_files[file]["scopes"] = self.get_scopes(table=target_files[file]) + target_files[file]["paths"] = self.get_paths(table=target_files[file]) + with open(os.path.join(temp,file),"rb") as f: + table_bytes = f.read() + target_files[file]["raw"] = table_bytes + # Let's read the table header and get the info we need + # + # [0:4] = Table Signature + # [4:8] = Length (little endian) + # [8] = Compliance Revision + # [9] = Checksum + # [10:16] = OEM ID (6 chars, padded to the right with \x00) + # [16:24] = Table ID (8 chars, padded to the right with \x00) + # [24:28] = OEM Revision (little endian) + # + target_files[file]["signature"] = table_bytes[0:4] + target_files[file]["revision"] = table_bytes[8] + target_files[file]["oem"] = table_bytes[10:16] + target_files[file]["id"] = table_bytes[16:24] + target_files[file]["oem_revision"] = int(binascii.hexlify(table_bytes[24:28][::-1]),16) + target_files[file]["length"] = len(table_bytes) + # Get the printable versions of the sig, oem, and id as needed + for key in ("signature","oem","id"): + unprintable,ascii_string = self.get_ascii_print(target_files[file][key]) + if unprintable: + target_files[file][key+"_ascii"] = ascii_string + # Cast as int on py2, and try to decode bytes to strings on py3 + if 2/3==0: + target_files[file]["revision"] = int(binascii.hexlify(target_files[file]["revision"]),16) + if target_files[file]["signature"] in self.mixed_listing: + # The disassembler omits the last line of hex data in a mixed listing + # file... convenient. However - we should be able to reconstruct this + # manually. + last_hex = next((l for l in target_files[file]["lines"][::-1] if self.is_hex(l)),None) + if last_hex: + # Get the address left of the colon + addr = int(last_hex.split(":")[0].strip(),16) + # Get the hex bytes right of the colon + hexs = last_hex.split(":")[1].split("//")[0].strip() + # Increment the address by the number of hex bytes + next_addr = addr+len(hexs.split()) + # Now we need to get the bytes at the end + hexb = self.get_hex_bytes(hexs.replace(" ","")) + # Get the last occurrence after the split + remaining = target_files[file]["raw"].split(hexb)[-1] + else: + # If we didn't get a last hex val - then we likely don't have any + # This can happen if the file passed is small enough, or has all + # the data in a single block. + next_addr = 0 + remaining = target_files[file]["raw"] + # Iterate in chunks of 16 + for chunk in [remaining[i:i+16] for i in range(0,len(remaining),16)]: + # Build a new byte string + hex_string = binascii.hexlify(chunk) + # Decode the bytes if we're on python 3 + if 2/3!=0: hex_string = hex_string.decode() + # Ensure the bytes are all upper case + hex_string = hex_string.upper() + l = " {}: {}".format( + hex(next_addr)[2:].upper().rjust(4,"0"), + " ".join([hex_string[i:i+2] for i in range(0,len(hex_string),2)]) + ) + # Increment our address + next_addr += len(chunk) + # Append our line + target_files[file]["lines"].append(l) + target_files[file]["table"] += "\n"+l + # Remove any that didn't disassemble + for file in to_remove: + target_files.pop(file,None) + except Exception as e: + print(e) + return ({},failed) + finally: + os.chdir(cwd) + if temp: shutil.rmtree(temp,ignore_errors=True) + # Add/update any tables we loaded + for table in target_files: + self.acpi_tables[table] = target_files[table] + # Only return the newly loaded results + return (target_files, failed,) + + def get_latest_iasl(self): + # First try getting from github - if that fails, fall back to intel.com + try: + source = self.dl.get_string(self.acpi_github_windows, progress=False, headers=self.h) + assets_url = None + # Check for attachments first + for line in source.split("\n"): + if 'iasl compiler and windows acpi tools" in line.lower(): + # Check if we have a direct download link - i.e. ends with .zip - or if we're + # redirected to a different download page - i.e. ends with .html + dl_link = line.split('iASL Compiler and Windows ACPI Tools + # Only a suffix - prepend to it + dl_page_url = "https://www.intel.com" + line.split(' {} failed: {}".format(f,new_name,e)) + print("Dump successful!") + if disassemble: + return self.load(res) + return res + else: + print("Failed to locate acpidump.exe") + return + elif sys.platform.startswith("linux"): + table_dir = "/sys/firmware/acpi/tables" + if not os.path.isdir(table_dir): + print("Could not locate {}!".format(table_dir)) + return + print("Copying tables to {}...".format(res)) + copied_files = [] + for table in os.listdir(table_dir): + if not os.path.isfile(os.path.join(table_dir,table)): + continue # We only want files + target_path = os.path.join(res,table.upper()+".aml") + comms = ( + # Copy the file + ["sudo","cp",os.path.join(table_dir,table),target_path], + # Ensure it's owned by the user account + ["sudo","chown",getpass.getuser(),target_path], + # Enable read and write permissions + ["sudo","chmod","a+rw",target_path] + ) + # Iterate our commands and bail if any error + for comm in comms: + out = self.r.run({"args":comm}) + if check_command_output(out): + return + print("Dump successful!") + if disassemble: + return self.load(res) + return res + + def check_output(self, output): + t_folder = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), output) + if not os.path.isdir(t_folder): + os.makedirs(t_folder) + return t_folder + + def get_hex_from_int(self, total, pad_to = 4): + hex_str = hex(total)[2:].upper().rjust(pad_to,"0") + return "".join([hex_str[i:i + 2] for i in range(0, len(hex_str), 2)][::-1]) + + def get_hex(self, line): + # strip the header and commented end + return line.split(":")[1].split("//")[0].replace(" ","") + + def get_line(self, line): + # Strip the header and commented end - no space replacing though + line = line.split("//")[0] + if ":" in line: + return line.split(":")[1] + return line + + def get_hex_bytes(self, line): + return binascii.unhexlify(line) + + def get_str_bytes(self, value): + if 2/3!=0 and isinstance(value,str): + value = value.encode() + return value + + def get_table_with_id(self, table_id): + table_id = self.get_str_bytes(table_id) + return next((v for k,v in self.acpi_tables.items() if table_id == v.get("id")),None) + + def get_table_with_signature(self, table_sig): + table_sig = self.get_str_bytes(table_sig) + return next((v for k,v in self.acpi_tables.items() if table_sig == v.get("signature")),None) + + def get_table(self, table_id_or_sig): + table_id_or_sig = self.get_str_bytes(table_id_or_sig) + return next((v for k,v in self.acpi_tables.items() if table_id_or_sig in (v.get("signature"),v.get("id"))),None) + + def get_dsdt(self): + return self.get_table_with_signature("DSDT") + + def get_dsdt_or_only(self): + dsdt = self.get_dsdt() + if dsdt: return dsdt + # Make sure we have only one table + if len(self.acpi_tables) != 1: + return None + return list(self.acpi_tables.values())[0] + + def find_previous_hex(self, index=0, table=None): + if not table: table = self.get_dsdt_or_only() + if not table: return ("",-1,-1) + # Returns the index of the previous set of hex digits before the passed index + start_index = -1 + end_index = -1 + old_hex = True + for i,line in enumerate(table.get("lines","")[index::-1]): + if old_hex: + if not self.is_hex(line): + # Broke out of the old hex + old_hex = False + continue + # Not old_hex territory - check if we got new hex + if self.is_hex(line): # Checks for a :, but not in comments + end_index = index-i + hex_text,start_index = self.get_hex_ending_at(end_index,table=table) + return (hex_text, start_index, end_index) + return ("",start_index,end_index) + + def find_next_hex(self, index=0, table=None): + if not table: table = self.get_dsdt_or_only() + if not table: return ("",-1,-1) + # Returns the index of the next set of hex digits after the passed index + start_index = -1 + end_index = -1 + old_hex = True + for i,line in enumerate(table.get("lines","")[index:]): + if old_hex: + if not self.is_hex(line): + # Broke out of the old hex + old_hex = False + continue + # Not old_hex territory - check if we got new hex + if self.is_hex(line): # Checks for a :, but not in comments + start_index = i+index + hex_text,end_index = self.get_hex_starting_at(start_index,table=table) + return (hex_text, start_index, end_index) + return ("",start_index,end_index) + + def is_hex(self, line): + return self.hex_match.match(line) is not None + + def get_hex_starting_at(self, start_index, table=None): + if not table: table = self.get_dsdt_or_only() + if not table: return ("",-1) + # Returns a tuple of the hex, and the ending index + hex_text = "" + index = -1 + for i,x in enumerate(table.get("lines","")[start_index:]): + if not self.is_hex(x): + break + hex_text += self.get_hex(x) + index = i+start_index + return (hex_text, index) + + def get_hex_ending_at(self, start_index, table=None): + if not table: table = self.get_dsdt_or_only() + if not table: return ("",-1) + # Returns a tuple of the hex, and the ending index + hex_text = "" + index = -1 + for i,x in enumerate(table.get("lines","")[start_index::-1]): + if not self.is_hex(x): + break + hex_text = self.get_hex(x)+hex_text + index = start_index-i + return (hex_text, index) + + def get_shortest_unique_pad(self, current_hex, index, instance=0, table=None): + if not table: table = self.get_dsdt_or_only() + if not table: return None + try: left_pad = self.get_unique_pad(current_hex, index, False, instance, table=table) + except: left_pad = None + try: right_pad = self.get_unique_pad(current_hex, index, True, instance, table=table) + except: right_pad = None + try: mid_pad = self.get_unique_pad(current_hex, index, None, instance, table=table) + except: mid_pad = None + if left_pad == right_pad == mid_pad is None: raise Exception("No unique pad found!") + # We got at least one unique pad + min_pad = None + for x in (left_pad,right_pad,mid_pad): + if x is None: continue # Skip + if min_pad is None or len(x[0]+x[1]) < len(min_pad[0]+min_pad[1]): + min_pad = x + return min_pad + + def get_unique_pad(self, current_hex, index, direction=None, instance=0, table=None): + if not table: table = self.get_dsdt_or_only() + if not table: raise Exception("No valid table passed!") + # Returns any pad needed to make the passed patch unique + # direction can be True = forward, False = backward, None = both + start_index = index + line,last_index = self.get_hex_starting_at(index,table=table) + if last_index == -1: + raise Exception("Could not find hex starting at index {}!".format(index)) + first_line = line + # Assume at least 1 byte of our current_hex exists at index, so we need to at + # least load in len(current_hex)-2 worth of data if we haven't found it. + while True: + if current_hex in line or len(line) >= len(first_line)+len(current_hex): + break # Assume we've hit our cap + new_line,_index,last_index = self.find_next_hex(last_index, table=table) + if last_index == -1: + raise Exception("Hit end of file before passed hex was located!") + # Append the new info + line += new_line + if not current_hex in line: + raise Exception("{} not found in table at index {}-{}!".format(current_hex,start_index,last_index)) + padl = padr = "" + parts = line.split(current_hex) + if instance >= len(parts)-1: + raise Exception("Instance out of range!") + linel = current_hex.join(parts[0:instance+1]) + liner = current_hex.join(parts[instance+1:]) + last_check = True # Default to forward + while True: + # Check if our hex string is unique + check_bytes = self.get_hex_bytes(padl+current_hex+padr) + if table["raw"].count(check_bytes) == 1: # Got it! + break + if direction == True or (direction is None and len(padr)<=len(padl)): + # Let's check a forward byte + if not len(liner): + # Need to grab more + liner, _index, last_index = self.find_next_hex(last_index, table=table) + if last_index == -1: raise Exception("Hit end of file before unique hex was found!") + padr = padr+liner[0:2] + liner = liner[2:] + continue + if direction == False or (direction is None and len(padl)<=len(padr)): + # Let's check a backward byte + if not len(linel): + # Need to grab more + linel, start_index, _index = self.find_previous_hex(start_index, table=table) + if _index == -1: raise Exception("Hit end of file before unique hex was found!") + padl = linel[-2:]+padl + linel = linel[:-2] + continue + break + return (padl,padr) + + def get_devices(self,search=None,types=("Device (","Scope ("),strip_comments=False,table=None): + if not table: table = self.get_dsdt_or_only() + if not table: return [] + # Returns a list of tuples organized as (Device/Scope,d_s_index,matched_index) + if search is None: + return [] + last_device = None + device_index = 0 + devices = [] + for index,line in enumerate(table.get("lines","")): + if self.is_hex(line): + continue + line = self.get_line(line) if strip_comments else line + if any ((x for x in types if x in line)): + # Got a last_device match + last_device = line + device_index = index + if search in line: + # Got a search hit - add it + devices.append((last_device,device_index,index)) + return devices + + def get_scope(self,starting_index=0,add_hex=False,strip_comments=False,table=None): + if not table: table = self.get_dsdt_or_only() + if not table: return [] + # Walks the scope starting at starting_index, and returns when + # we've exited + brackets = None + scope = [] + for line in table.get("lines","")[starting_index:]: + if self.is_hex(line): + if add_hex: + scope.append(line) + continue + line = self.get_line(line) if strip_comments else line + scope.append(line) + if brackets is None: + if line.count("{"): + brackets = line.count("{") + continue + brackets = brackets + line.count("{") - line.count("}") + if brackets <= 0: + # We've exited the scope + return scope + return scope + + def get_scopes(self, table=None): + if not table: table = self.get_dsdt_or_only() + if not table: return [] + scopes = [] + for index,line in enumerate(table.get("lines","")): + if self.is_hex(line): continue + if any(x in line for x in ("Processor (","Scope (","Device (","Method (","Name (")): + scopes.append((line,index)) + return scopes + + def get_paths(self, table=None): + if not table: table = self.get_dsdt_or_only() + if not table: return [] + # Set up lists for complete paths, as well + # as our current path reference + path_list = [] + _path = [] + brackets = 0 + for i,line in enumerate(table.get("lines",[])): + if self.is_hex(line): + # Skip hex + continue + line = self.get_line(line) + brackets += line.count("{")-line.count("}") + while len(_path): + # Remove any path entries that are nested + # equal to or further than our current set + if _path[-1][-1] >= brackets: + del _path[-1] + else: + break + type_match = self.type_match.match(line) + if type_match: + # Add our path entry and save the full path + # to the path list as needed + _path.append((type_match.group("name"),brackets)) + if type_match.group("type") == "Scope": + continue + # Ensure that we only consider non-Scope paths that aren't + # already fully qualified with a \ prefix + path = [] + for p in _path[::-1]: + path.append(p[0]) + p_check = p[0].split(".")[0].rstrip("_") + if p_check.startswith("\\") or p_check in ("_SB","_PR"): + # Fully qualified - bail here + break + path = ".".join(path[::-1]).split(".") + # Properly qualify the path + if len(path) and path[0] == "\\": path.pop(0) + if any("^" in x for x in path): # Accommodate caret notation + new_path = [] + for x in path: + if x.count("^"): + # Remove the last Y paths to account for going up a level + del new_path[-1*x.count("^"):] + new_path.append(x.replace("^","")) # Add the original, removing any ^ chars + path = new_path + if not path: + continue + # Ensure we strip trailing underscores for consistency + padded_path = [("\\" if j==0 else"")+x.lstrip("\\").rstrip("_") for j,x in enumerate(path)] + path_str = ".".join(padded_path) + path_list.append((path_str,i,type_match.group("type"))) + return sorted(path_list) + + def get_path_of_type(self, obj_type="Device", obj="HPET", table=None): + if not table: table = self.get_dsdt_or_only() + if not table: return [] + paths = [] + # Remove trailing underscores and normalize case for all path + # elements passed + obj = ".".join([x.rstrip("_").upper() for x in obj.split(".")]) + obj_type = obj_type.lower() if obj_type else obj_type + for path in table.get("paths",[]): + path_check = ".".join([x.rstrip("_").upper() for x in path[0].split(".")]) + if (obj_type and obj_type != path[2].lower()) or not path_check.endswith(obj): + # Type or object mismatch - skip + continue + paths.append(path) + return sorted(paths) + + def get_device_paths(self, obj="HPET",table=None): + return self.get_path_of_type(obj_type="Device",obj=obj,table=table) + + def get_method_paths(self, obj="_STA",table=None): + return self.get_path_of_type(obj_type="Method",obj=obj,table=table) + + def get_name_paths(self, obj="CPU0",table=None): + return self.get_path_of_type(obj_type="Name",obj=obj,table=table) + + def get_processor_paths(self, obj_type="Processor",table=None): + return self.get_path_of_type(obj_type=obj_type,obj="",table=table) + + def get_device_paths_with_id(self,_id="PNP0A03",id_types=("_HID","_CID"),table=None): + if not table: table = self.get_dsdt_or_only() + if not table: return [] + if not isinstance(id_types,(list,tuple)): return [] + # Strip non-strings from the list + id_types = [x.upper() for x in id_types if isinstance(x,str)] + if not id_types: return [] + _id = _id.upper() # Ensure case + devs = [] + for p in table.get("paths",[]): + try: + for type_check in id_types: + if p[0].endswith(type_check) and _id in table.get("lines")[p[1]]: + # Save the path, strip the suffix and trailing periods + devs.append(p[0][:-len(type_check)].rstrip(".")) + # Leave this loop to avoid adding the same device + # multiple times + break + except Exception as e: + print(e) + continue + devices = [] + # Walk the paths again - and save any devices + # that match our prior list + for p in table.get("paths",[]): + if p[0] in devs and p[-1] == "Device": + devices.append(p) + return devices + + def get_device_paths_with_cid(self,cid="PNP0A03",table=None): + return self.get_device_paths_with_id(_id=cid,id_types=("_CID",),table=table) + + def get_device_paths_with_hid(self,hid="ACPI000E",table=None): + return self.get_device_paths_with_id(_id=hid,id_types=("_HID",),table=table) diff --git a/ACPI/SSDTTime-master/Scripts/plist.py b/ACPI/SSDTTime-master/Scripts/plist.py new file mode 100644 index 0000000..c6274dd --- /dev/null +++ b/ACPI/SSDTTime-master/Scripts/plist.py @@ -0,0 +1,688 @@ +### ### +# Imports # +### ### + +import datetime, os, plistlib, struct, sys, itertools, binascii +from io import BytesIO + +if sys.version_info < (3,0): + # Force use of StringIO instead of cStringIO as the latter + # has issues with Unicode strings + from StringIO import StringIO +else: + from io import StringIO + +try: + basestring # Python 2 + unicode +except NameError: + basestring = str # Python 3 + unicode = str + +try: + FMT_XML = plistlib.FMT_XML + FMT_BINARY = plistlib.FMT_BINARY +except AttributeError: + FMT_XML = "FMT_XML" + FMT_BINARY = "FMT_BINARY" + +### ### +# Helper Methods # +### ### + +def wrap_data(value): + if not _check_py3(): return plistlib.Data(value) + return value + +def extract_data(value): + if not _check_py3() and isinstance(value,plistlib.Data): return value.data + return value + +def _check_py3(): + return sys.version_info >= (3, 0) + +def _is_binary(fp): + if isinstance(fp, basestring): + return fp.startswith(b"bplist00") + header = fp.read(32) + fp.seek(0) + return header[:8] == b'bplist00' + +def _seek_past_whitespace(fp): + offset = 0 + while True: + byte = fp.read(1) + if not byte: + # End of file, reset offset and bail + offset = 0 + break + if not byte.isspace(): + # Found our first non-whitespace character + break + offset += 1 + # Seek to the first non-whitespace char + fp.seek(offset) + return offset + +### ### +# Deprecated Functions - Remapped # +### ### + +def readPlist(pathOrFile): + if not isinstance(pathOrFile, basestring): + return load(pathOrFile) + with open(pathOrFile, "rb") as f: + return load(f) + +def writePlist(value, pathOrFile): + if not isinstance(pathOrFile, basestring): + return dump(value, pathOrFile, fmt=FMT_XML, sort_keys=True, skipkeys=False) + with open(pathOrFile, "wb") as f: + return dump(value, f, fmt=FMT_XML, sort_keys=True, skipkeys=False) + +### ### +# Remapped Functions # +### ### + +def load(fp, fmt=None, use_builtin_types=None, dict_type=dict): + if _is_binary(fp): + use_builtin_types = False if use_builtin_types is None else use_builtin_types + try: + p = _BinaryPlistParser(use_builtin_types=use_builtin_types, dict_type=dict_type) + except: + # Python 3.9 removed use_builtin_types + p = _BinaryPlistParser(dict_type=dict_type) + return p.parse(fp) + elif _check_py3(): + offset = _seek_past_whitespace(fp) + use_builtin_types = True if use_builtin_types is None else use_builtin_types + # We need to monkey patch this to allow for hex integers - code taken/modified from + # https://github.com/python/cpython/blob/3.8/Lib/plistlib.py + if fmt is None: + header = fp.read(32) + fp.seek(offset) + for info in plistlib._FORMATS.values(): + if info['detect'](header): + P = info['parser'] + break + else: + raise plistlib.InvalidFileException() + else: + P = plistlib._FORMATS[fmt]['parser'] + try: + p = P(use_builtin_types=use_builtin_types, dict_type=dict_type) + except: + # Python 3.9 removed use_builtin_types + p = P(dict_type=dict_type) + if isinstance(p,plistlib._PlistParser): + # Monkey patch! + def end_integer(): + d = p.get_data() + value = int(d,16) if d.lower().startswith("0x") else int(d) + if -1 << 63 <= value < 1 << 64: + p.add_object(value) + else: + raise OverflowError("Integer overflow at line {}".format(p.parser.CurrentLineNumber)) + def end_data(): + try: + p.add_object(plistlib._decode_base64(p.get_data())) + except Exception as e: + raise Exception("Data error at line {}: {}".format(p.parser.CurrentLineNumber,e)) + p.end_integer = end_integer + p.end_data = end_data + return p.parse(fp) + else: + offset = _seek_past_whitespace(fp) + # Is not binary - assume a string - and try to load + # We avoid using readPlistFromString() as that uses + # cStringIO and fails when Unicode strings are detected + # Don't subclass - keep the parser local + from xml.parsers.expat import ParserCreate + # Create a new PlistParser object - then we need to set up + # the values and parse. + p = plistlib.PlistParser() + parser = ParserCreate() + parser.StartElementHandler = p.handleBeginElement + parser.EndElementHandler = p.handleEndElement + parser.CharacterDataHandler = p.handleData + # We also need to monkey patch this to allow for other dict_types, hex int support + # proper line output for data errors, and for unicode string decoding + def begin_dict(attrs): + d = dict_type() + p.addObject(d) + p.stack.append(d) + def end_integer(): + d = p.getData() + value = int(d,16) if d.lower().startswith("0x") else int(d) + if -1 << 63 <= value < 1 << 64: + p.addObject(value) + else: + raise OverflowError("Integer overflow at line {}".format(parser.CurrentLineNumber)) + def end_data(): + try: + p.addObject(plistlib.Data.fromBase64(p.getData())) + except Exception as e: + raise Exception("Data error at line {}: {}".format(parser.CurrentLineNumber,e)) + def end_string(): + d = p.getData() + if isinstance(d,unicode): + d = d.encode("utf-8") + p.addObject(d) + p.begin_dict = begin_dict + p.end_integer = end_integer + p.end_data = end_data + p.end_string = end_string + if isinstance(fp, unicode): + # Encode unicode -> string; use utf-8 for safety + fp = fp.encode("utf-8") + if isinstance(fp, basestring): + # It's a string - let's wrap it up + fp = StringIO(fp) + # Parse it + parser.ParseFile(fp) + return p.root + +def loads(value, fmt=None, use_builtin_types=None, dict_type=dict): + if _check_py3() and isinstance(value, basestring): + # If it's a string - encode it + value = value.encode() + try: + return load(BytesIO(value),fmt=fmt,use_builtin_types=use_builtin_types,dict_type=dict_type) + except: + # Python 3.9 removed use_builtin_types + return load(BytesIO(value),fmt=fmt,dict_type=dict_type) + +def dump(value, fp, fmt=FMT_XML, sort_keys=True, skipkeys=False): + if fmt == FMT_BINARY: + # Assume binary at this point + writer = _BinaryPlistWriter(fp, sort_keys=sort_keys, skipkeys=skipkeys) + writer.write(value) + elif fmt == FMT_XML: + if _check_py3(): + plistlib.dump(value, fp, fmt=fmt, sort_keys=sort_keys, skipkeys=skipkeys) + else: + # We need to monkey patch a bunch here too in order to avoid auto-sorting + # of keys + writer = plistlib.PlistWriter(fp) + def writeDict(d): + if d: + writer.beginElement("dict") + items = sorted(d.items()) if sort_keys else d.items() + for key, value in items: + if not isinstance(key, basestring): + if skipkeys: + continue + raise TypeError("keys must be strings") + writer.simpleElement("key", key) + writer.writeValue(value) + writer.endElement("dict") + else: + writer.simpleElement("dict") + writer.writeDict = writeDict + writer.writeln("") + writer.writeValue(value) + writer.writeln("") + else: + # Not a proper format + raise ValueError("Unsupported format: {}".format(fmt)) + +def dumps(value, fmt=FMT_XML, skipkeys=False, sort_keys=True): + # We avoid using writePlistToString() as that uses + # cStringIO and fails when Unicode strings are detected + f = BytesIO() if _check_py3() else StringIO() + dump(value, f, fmt=fmt, skipkeys=skipkeys, sort_keys=sort_keys) + value = f.getvalue() + if _check_py3(): + value = value.decode("utf-8") + return value + +### ### +# Binary Plist Stuff For Py2 # +### ### + +# From the python 3 plistlib.py source: https://github.com/python/cpython/blob/3.11/Lib/plistlib.py +# Tweaked to function on both Python 2 and 3 + +class UID: + def __init__(self, data): + if not isinstance(data, int): + raise TypeError("data must be an int") + # It seems Apple only uses 32-bit unsigned ints for UIDs. Although the comment in + # CoreFoundation's CFBinaryPList.c detailing the binary plist format theoretically + # allows for 64-bit UIDs, most functions in the same file use 32-bit unsigned ints, + # with the sole function hinting at 64-bits appearing to be a leftover from copying + # and pasting integer handling code internally, and this code has not changed since + # it was added. (In addition, code in CFPropertyList.c to handle CF$UID also uses a + # 32-bit unsigned int.) + # + # if data >= 1 << 64: + # raise ValueError("UIDs cannot be >= 2**64") + if data >= 1 << 32: + raise ValueError("UIDs cannot be >= 2**32 (4294967296)") + if data < 0: + raise ValueError("UIDs must be positive") + self.data = data + + def __index__(self): + return self.data + + def __repr__(self): + return "%s(%s)" % (self.__class__.__name__, repr(self.data)) + + def __reduce__(self): + return self.__class__, (self.data,) + + def __eq__(self, other): + if not isinstance(other, UID): + return NotImplemented + return self.data == other.data + + def __hash__(self): + return hash(self.data) + +class InvalidFileException (ValueError): + def __init__(self, message="Invalid file"): + ValueError.__init__(self, message) + +_BINARY_FORMAT = {1: 'B', 2: 'H', 4: 'L', 8: 'Q'} + +_undefined = object() + +class _BinaryPlistParser: + """ + Read or write a binary plist file, following the description of the binary + format. Raise InvalidFileException in case of error, otherwise return the + root object. + see also: http://opensource.apple.com/source/CF/CF-744.18/CFBinaryPList.c + """ + def __init__(self, use_builtin_types, dict_type): + self._use_builtin_types = use_builtin_types + self._dict_type = dict_type + + def parse(self, fp): + try: + # The basic file format: + # HEADER + # object... + # refid->offset... + # TRAILER + self._fp = fp + self._fp.seek(-32, os.SEEK_END) + trailer = self._fp.read(32) + if len(trailer) != 32: + raise InvalidFileException() + ( + offset_size, self._ref_size, num_objects, top_object, + offset_table_offset + ) = struct.unpack('>6xBBQQQ', trailer) + self._fp.seek(offset_table_offset) + self._object_offsets = self._read_ints(num_objects, offset_size) + self._objects = [_undefined] * num_objects + return self._read_object(top_object) + + except (OSError, IndexError, struct.error, OverflowError, + UnicodeDecodeError): + raise InvalidFileException() + + def _get_size(self, tokenL): + """ return the size of the next object.""" + if tokenL == 0xF: + m = self._fp.read(1)[0] + if not _check_py3(): + m = ord(m) + m = m & 0x3 + s = 1 << m + f = '>' + _BINARY_FORMAT[s] + return struct.unpack(f, self._fp.read(s))[0] + + return tokenL + + def _read_ints(self, n, size): + data = self._fp.read(size * n) + if size in _BINARY_FORMAT: + return struct.unpack('>' + _BINARY_FORMAT[size] * n, data) + else: + if not size or len(data) != size * n: + raise InvalidFileException() + return tuple(int(binascii.hexlify(data[i: i + size]),16) + for i in range(0, size * n, size)) + '''return tuple(int.from_bytes(data[i: i + size], 'big') + for i in range(0, size * n, size))''' + + def _read_refs(self, n): + return self._read_ints(n, self._ref_size) + + def _read_object(self, ref): + """ + read the object by reference. + May recursively read sub-objects (content of an array/dict/set) + """ + result = self._objects[ref] + if result is not _undefined: + return result + + offset = self._object_offsets[ref] + self._fp.seek(offset) + token = self._fp.read(1)[0] + if not _check_py3(): + token = ord(token) + tokenH, tokenL = token & 0xF0, token & 0x0F + + if token == 0x00: # \x00 or 0x00 + result = None + + elif token == 0x08: # \x08 or 0x08 + result = False + + elif token == 0x09: # \x09 or 0x09 + result = True + + # The referenced source code also mentions URL (0x0c, 0x0d) and + # UUID (0x0e), but neither can be generated using the Cocoa libraries. + + elif token == 0x0f: # \x0f or 0x0f + result = b'' + + elif tokenH == 0x10: # int + result = int(binascii.hexlify(self._fp.read(1 << tokenL)),16) + if tokenL >= 3: # Signed - adjust + result = result-((result & 0x8000000000000000) << 1) + + elif token == 0x22: # real + result = struct.unpack('>f', self._fp.read(4))[0] + + elif token == 0x23: # real + result = struct.unpack('>d', self._fp.read(8))[0] + + elif token == 0x33: # date + f = struct.unpack('>d', self._fp.read(8))[0] + # timestamp 0 of binary plists corresponds to 1/1/2001 + # (year of Mac OS X 10.0), instead of 1/1/1970. + result = (datetime.datetime(2001, 1, 1) + + datetime.timedelta(seconds=f)) + + elif tokenH == 0x40: # data + s = self._get_size(tokenL) + if self._use_builtin_types or not hasattr(plistlib, "Data"): + result = self._fp.read(s) + else: + result = plistlib.Data(self._fp.read(s)) + + elif tokenH == 0x50: # ascii string + s = self._get_size(tokenL) + result = self._fp.read(s).decode('ascii') + result = result + + elif tokenH == 0x60: # unicode string + s = self._get_size(tokenL) + result = self._fp.read(s * 2).decode('utf-16be') + + elif tokenH == 0x80: # UID + # used by Key-Archiver plist files + result = UID(int(binascii.hexlify(self._fp.read(1 + tokenL)),16)) + + elif tokenH == 0xA0: # array + s = self._get_size(tokenL) + obj_refs = self._read_refs(s) + result = [] + self._objects[ref] = result + result.extend(self._read_object(x) for x in obj_refs) + + # tokenH == 0xB0 is documented as 'ordset', but is not actually + # implemented in the Apple reference code. + + # tokenH == 0xC0 is documented as 'set', but sets cannot be used in + # plists. + + elif tokenH == 0xD0: # dict + s = self._get_size(tokenL) + key_refs = self._read_refs(s) + obj_refs = self._read_refs(s) + result = self._dict_type() + self._objects[ref] = result + for k, o in zip(key_refs, obj_refs): + key = self._read_object(k) + if hasattr(plistlib, "Data") and isinstance(key, plistlib.Data): + key = key.data + result[key] = self._read_object(o) + + else: + raise InvalidFileException() + + self._objects[ref] = result + return result + +def _count_to_size(count): + if count < 1 << 8: + return 1 + + elif count < 1 << 16: + return 2 + + elif count < 1 << 32: + return 4 + + else: + return 8 + +_scalars = (str, int, float, datetime.datetime, bytes) + +class _BinaryPlistWriter (object): + def __init__(self, fp, sort_keys, skipkeys): + self._fp = fp + self._sort_keys = sort_keys + self._skipkeys = skipkeys + + def write(self, value): + + # Flattened object list: + self._objlist = [] + + # Mappings from object->objectid + # First dict has (type(object), object) as the key, + # second dict is used when object is not hashable and + # has id(object) as the key. + self._objtable = {} + self._objidtable = {} + + # Create list of all objects in the plist + self._flatten(value) + + # Size of object references in serialized containers + # depends on the number of objects in the plist. + num_objects = len(self._objlist) + self._object_offsets = [0]*num_objects + self._ref_size = _count_to_size(num_objects) + + self._ref_format = _BINARY_FORMAT[self._ref_size] + + # Write file header + self._fp.write(b'bplist00') + + # Write object list + for obj in self._objlist: + self._write_object(obj) + + # Write refnum->object offset table + top_object = self._getrefnum(value) + offset_table_offset = self._fp.tell() + offset_size = _count_to_size(offset_table_offset) + offset_format = '>' + _BINARY_FORMAT[offset_size] * num_objects + self._fp.write(struct.pack(offset_format, *self._object_offsets)) + + # Write trailer + sort_version = 0 + trailer = ( + sort_version, offset_size, self._ref_size, num_objects, + top_object, offset_table_offset + ) + self._fp.write(struct.pack('>5xBBBQQQ', *trailer)) + + def _flatten(self, value): + # First check if the object is in the object table, not used for + # containers to ensure that two subcontainers with the same contents + # will be serialized as distinct values. + if isinstance(value, _scalars): + if (type(value), value) in self._objtable: + return + + elif hasattr(plistlib, "Data") and isinstance(value, plistlib.Data): + if (type(value.data), value.data) in self._objtable: + return + + elif id(value) in self._objidtable: + return + + # Add to objectreference map + refnum = len(self._objlist) + self._objlist.append(value) + if isinstance(value, _scalars): + self._objtable[(type(value), value)] = refnum + elif hasattr(plistlib, "Data") and isinstance(value, plistlib.Data): + self._objtable[(type(value.data), value.data)] = refnum + else: + self._objidtable[id(value)] = refnum + + # And finally recurse into containers + if isinstance(value, dict): + keys = [] + values = [] + items = value.items() + if self._sort_keys: + items = sorted(items) + + for k, v in items: + if not isinstance(k, basestring): + if self._skipkeys: + continue + raise TypeError("keys must be strings") + keys.append(k) + values.append(v) + + for o in itertools.chain(keys, values): + self._flatten(o) + + elif isinstance(value, (list, tuple)): + for o in value: + self._flatten(o) + + def _getrefnum(self, value): + if isinstance(value, _scalars): + return self._objtable[(type(value), value)] + elif hasattr(plistlib, "Data") and isinstance(value, plistlib.Data): + return self._objtable[(type(value.data), value.data)] + else: + return self._objidtable[id(value)] + + def _write_size(self, token, size): + if size < 15: + self._fp.write(struct.pack('>B', token | size)) + + elif size < 1 << 8: + self._fp.write(struct.pack('>BBB', token | 0xF, 0x10, size)) + + elif size < 1 << 16: + self._fp.write(struct.pack('>BBH', token | 0xF, 0x11, size)) + + elif size < 1 << 32: + self._fp.write(struct.pack('>BBL', token | 0xF, 0x12, size)) + + else: + self._fp.write(struct.pack('>BBQ', token | 0xF, 0x13, size)) + + def _write_object(self, value): + ref = self._getrefnum(value) + self._object_offsets[ref] = self._fp.tell() + if value is None: + self._fp.write(b'\x00') + + elif value is False: + self._fp.write(b'\x08') + + elif value is True: + self._fp.write(b'\x09') + + elif isinstance(value, int): + if value < 0: + try: + self._fp.write(struct.pack('>Bq', 0x13, value)) + except struct.error: + raise OverflowError(value) # from None + elif value < 1 << 8: + self._fp.write(struct.pack('>BB', 0x10, value)) + elif value < 1 << 16: + self._fp.write(struct.pack('>BH', 0x11, value)) + elif value < 1 << 32: + self._fp.write(struct.pack('>BL', 0x12, value)) + elif value < 1 << 63: + self._fp.write(struct.pack('>BQ', 0x13, value)) + elif value < 1 << 64: + self._fp.write(b'\x14' + value.to_bytes(16, 'big', signed=True)) + else: + raise OverflowError(value) + + elif isinstance(value, float): + self._fp.write(struct.pack('>Bd', 0x23, value)) + + elif isinstance(value, datetime.datetime): + f = (value - datetime.datetime(2001, 1, 1)).total_seconds() + self._fp.write(struct.pack('>Bd', 0x33, f)) + + elif (_check_py3() and isinstance(value, (bytes, bytearray))) or (hasattr(plistlib, "Data") and isinstance(value, plistlib.Data)): + if not isinstance(value, (bytes, bytearray)): + value = value.data # Unpack it + self._write_size(0x40, len(value)) + self._fp.write(value) + + elif isinstance(value, basestring): + try: + t = value.encode('ascii') + self._write_size(0x50, len(value)) + except UnicodeEncodeError: + t = value.encode('utf-16be') + self._write_size(0x60, len(t) // 2) + self._fp.write(t) + + elif isinstance(value, UID) or (hasattr(plistlib,"UID") and isinstance(value, plistlib.UID)): + if value.data < 0: + raise ValueError("UIDs must be positive") + elif value.data < 1 << 8: + self._fp.write(struct.pack('>BB', 0x80, value)) + elif value.data < 1 << 16: + self._fp.write(struct.pack('>BH', 0x81, value)) + elif value.data < 1 << 32: + self._fp.write(struct.pack('>BL', 0x83, value)) + # elif value.data < 1 << 64: + # self._fp.write(struct.pack('>BQ', 0x87, value)) + else: + raise OverflowError(value) + + elif isinstance(value, (list, tuple)): + refs = [self._getrefnum(o) for o in value] + s = len(refs) + self._write_size(0xA0, s) + self._fp.write(struct.pack('>' + self._ref_format * s, *refs)) + + elif isinstance(value, dict): + keyRefs, valRefs = [], [] + + if self._sort_keys: + rootItems = sorted(value.items()) + else: + rootItems = value.items() + + for k, v in rootItems: + if not isinstance(k, basestring): + if self._skipkeys: + continue + raise TypeError("keys must be strings") + keyRefs.append(self._getrefnum(k)) + valRefs.append(self._getrefnum(v)) + + s = len(keyRefs) + self._write_size(0xD0, s) + self._fp.write(struct.pack('>' + self._ref_format * s, *keyRefs)) + self._fp.write(struct.pack('>' + self._ref_format * s, *valRefs)) + + else: + raise TypeError(value) diff --git a/ACPI/SSDTTime-master/Scripts/reveal.py b/ACPI/SSDTTime-master/Scripts/reveal.py new file mode 100644 index 0000000..169e869 --- /dev/null +++ b/ACPI/SSDTTime-master/Scripts/reveal.py @@ -0,0 +1,69 @@ +import sys, os +from . import run + +class Reveal: + + def __init__(self): + self.r = run.Run() + return + + def get_parent(self, path): + return os.path.normpath(os.path.join(path, os.pardir)) + + def reveal(self, path, new_window = False): + # Reveals the passed path in Finder - only works on macOS + if not sys.platform == "darwin": + return ("", "macOS Only", 1) + if not path: + # No path sent - nothing to reveal + return ("", "No path specified", 1) + # Build our script - then convert it to a single line task + if not os.path.exists(path): + # Not real - bail + return ("", "{} - doesn't exist".format(path), 1) + # Get the absolute path + path = os.path.abspath(path) + command = ["osascript"] + if new_window: + command.extend([ + "-e", "set p to \"{}\"".format(path.replace("\"", "\\\"")), + "-e", "tell application \"Finder\"", + "-e", "reveal POSIX file p as text", + "-e", "activate", + "-e", "end tell" + ]) + else: + if path == self.get_parent(path): + command.extend([ + "-e", "set p to \"{}\"".format(path.replace("\"", "\\\"")), + "-e", "tell application \"Finder\"", + "-e", "reopen", + "-e", "activate", + "-e", "set target of window 1 to (POSIX file p as text)", + "-e", "end tell" + ]) + else: + command.extend([ + "-e", "set o to \"{}\"".format(self.get_parent(path).replace("\"", "\\\"")), + "-e", "set p to \"{}\"".format(path.replace("\"", "\\\"")), + "-e", "tell application \"Finder\"", + "-e", "reopen", + "-e", "activate", + "-e", "set target of window 1 to (POSIX file o as text)", + "-e", "select (POSIX file p as text)", + "-e", "end tell" + ]) + return self.r.run({"args" : command}) + + def notify(self, title = None, subtitle = None, sound = None): + # Sends a notification + if not title: + return ("", "Malformed dict", 1) + # Build our notification + n_text = "display notification with title \"{}\"".format(title.replace("\"", "\\\"")) + if subtitle: + n_text += " subtitle \"{}\"".format(subtitle.replace("\"", "\\\"")) + if sound: + n_text += " sound name \"{}\"".format(sound.replace("\"", "\\\"")) + command = ["osascript", "-e", n_text] + return self.r.run({"args" : command}) diff --git a/ACPI/SSDTTime-master/Scripts/run.py b/ACPI/SSDTTime-master/Scripts/run.py new file mode 100644 index 0000000..b586adc --- /dev/null +++ b/ACPI/SSDTTime-master/Scripts/run.py @@ -0,0 +1,151 @@ +import sys, subprocess, time, threading, shlex +try: + from Queue import Queue, Empty +except: + from queue import Queue, Empty + +ON_POSIX = 'posix' in sys.builtin_module_names + +class Run: + + def __init__(self): + return + + def _read_output(self, pipe, q): + try: + for line in iter(lambda: pipe.read(1), b''): + q.put(line) + except ValueError: + pass + pipe.close() + + def _create_thread(self, output): + # Creates a new queue and thread object to watch based on the output pipe sent + q = Queue() + t = threading.Thread(target=self._read_output, args=(output, q)) + t.daemon = True + return (q,t) + + def _stream_output(self, comm, shell = False): + output = error = "" + p = None + try: + if shell and type(comm) is list: + comm = " ".join(shlex.quote(x) for x in comm) + if not shell and type(comm) is str: + comm = shlex.split(comm) + p = subprocess.Popen(comm, shell=shell, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=0, universal_newlines=True, close_fds=ON_POSIX) + # Setup the stdout thread/queue + q,t = self._create_thread(p.stdout) + qe,te = self._create_thread(p.stderr) + # Start both threads + t.start() + te.start() + + while True: + c = z = "" + try: c = q.get_nowait() + except Empty: pass + else: + sys.stdout.write(c) + output += c + sys.stdout.flush() + try: z = qe.get_nowait() + except Empty: pass + else: + sys.stderr.write(z) + error += z + sys.stderr.flush() + if not c==z=="": continue # Keep going until empty + # No output - see if still running + p.poll() + if p.returncode != None: + # Subprocess ended + break + # No output, but subprocess still running - stall for 20ms + time.sleep(0.02) + + o, e = p.communicate() + return (output+o, error+e, p.returncode) + except: + if p: + try: o, e = p.communicate() + except: o = e = "" + return (output+o, error+e, p.returncode) + return ("", "Command not found!", 1) + + def _decode(self, value, encoding="utf-8", errors="ignore"): + # Helper method to only decode if bytes type + if sys.version_info >= (3,0) and isinstance(value, bytes): + return value.decode(encoding,errors) + return value + + def _run_command(self, comm, shell = False): + c = None + try: + if shell and type(comm) is list: + comm = " ".join(shlex.quote(x) for x in comm) + if not shell and type(comm) is str: + comm = shlex.split(comm) + p = subprocess.Popen(comm, shell=shell, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + c = p.communicate() + except: + if c == None: + return ("", "Command not found!", 1) + return (self._decode(c[0]), self._decode(c[1]), p.returncode) + + def run(self, command_list, leave_on_fail = False): + # Command list should be an array of dicts + if type(command_list) is dict: + # We only have one command + command_list = [command_list] + output_list = [] + for comm in command_list: + args = comm.get("args", []) + shell = comm.get("shell", False) + stream = comm.get("stream", False) + sudo = comm.get("sudo", False) + stdout = comm.get("stdout", False) + stderr = comm.get("stderr", False) + mess = comm.get("message", None) + show = comm.get("show", False) + + if not mess == None: + print(mess) + + if not len(args): + # nothing to process + continue + if sudo: + # Check if we have sudo + out = self._run_command(["which", "sudo"]) + if "sudo" in out[0]: + # Can sudo + if type(args) is list: + args.insert(0, out[0].replace("\n", "")) # add to start of list + elif type(args) is str: + args = out[0].replace("\n", "") + " " + args # add to start of string + + if show: + print(" ".join(args)) + + if stream: + # Stream it! + out = self._stream_output(args, shell) + else: + # Just run and gather output + out = self._run_command(args, shell) + if stdout and len(out[0]): + print(out[0]) + if stderr and len(out[1]): + print(out[1]) + # Append output + output_list.append(out) + # Check for errors + if leave_on_fail and out[2] != 0: + # Got an error - leave + break + if len(output_list) == 1: + # We only ran one command - just return that output + return output_list[0] + return output_list diff --git a/ACPI/SSDTTime-master/Scripts/utils.py b/ACPI/SSDTTime-master/Scripts/utils.py new file mode 100644 index 0000000..9015099 --- /dev/null +++ b/ACPI/SSDTTime-master/Scripts/utils.py @@ -0,0 +1,263 @@ +import sys, os, time, re, json, datetime, ctypes, subprocess + +if os.name == "nt": + # Windows + import msvcrt +else: + # Not Windows \o/ + import select + +class Utils: + + def __init__(self, name = "Python Script"): + self.name = name + # Init our colors before we need to print anything + cwd = os.getcwd() + os.chdir(os.path.dirname(os.path.realpath(__file__))) + if os.path.exists("colors.json"): + self.colors_dict = json.load(open("colors.json")) + else: + self.colors_dict = {} + os.chdir(cwd) + + def check_admin(self): + # Returns whether or not we're admin + try: + is_admin = os.getuid() == 0 + except AttributeError: + is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0 + return is_admin + + def elevate(self, file): + # Runs the passed file as admin + if self.check_admin(): + return + if os.name == "nt": + ctypes.windll.shell32.ShellExecuteW(None, "runas", '"{}"'.format(sys.executable), '"{}"'.format(file), None, 1) + else: + try: + p = subprocess.Popen(["which", "sudo"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + c = p.communicate()[0].decode("utf-8", "ignore").replace("\n", "") + os.execv(c, [ sys.executable, 'python'] + sys.argv) + except: + exit(1) + + def compare_versions(self, vers1, vers2, **kwargs): + # Helper method to compare ##.## strings + # + # vers1 < vers2 = True + # vers1 = vers2 = None + # vers1 > vers2 = False + + # Sanitize the pads + pad = str(kwargs.get("pad", "")) + sep = str(kwargs.get("separator", ".")) + + ignore_case = kwargs.get("ignore_case", True) + + # Cast as strings + vers1 = str(vers1) + vers2 = str(vers2) + + if ignore_case: + vers1 = vers1.lower() + vers2 = vers2.lower() + + # Split and pad lists + v1_parts, v2_parts = self.pad_length(vers1.split(sep), vers2.split(sep)) + + # Iterate and compare + for i in range(len(v1_parts)): + # Remove non-numeric + v1 = ''.join(c.lower() for c in v1_parts[i] if c.isalnum()) + v2 = ''.join(c.lower() for c in v2_parts[i] if c.isalnum()) + # Equalize the lengths + v1, v2 = self.pad_length(v1, v2) + # Compare + if str(v1) < str(v2): + return True + elif str(v1) > str(v2): + return False + # Never differed - return None, must be equal + return None + + def pad_length(self, var1, var2, pad = "0"): + # Pads the vars on the left side to make them equal length + pad = "0" if len(str(pad)) < 1 else str(pad)[0] + if not type(var1) == type(var2): + # Type mismatch! Just return what we got + return (var1, var2) + if len(var1) < len(var2): + if type(var1) is list: + var1.extend([str(pad) for x in range(len(var2) - len(var1))]) + else: + var1 = "{}{}".format((pad*(len(var2)-len(var1))), var1) + elif len(var2) < len(var1): + if type(var2) is list: + var2.extend([str(pad) for x in range(len(var1) - len(var2))]) + else: + var2 = "{}{}".format((pad*(len(var1)-len(var2))), var2) + return (var1, var2) + + def check_path(self, path): + # Let's loop until we either get a working path, or no changes + test_path = path + last_path = None + while True: + # Bail if we've looped at least once and the path didn't change + if last_path != None and last_path == test_path: return None + last_path = test_path + # Check if we stripped everything out + if not len(test_path): return None + # Check if we have a valid path + if os.path.exists(test_path): + return os.path.abspath(test_path) + # Check for quotes + if test_path[0] == test_path[-1] and test_path[0] in ('"',"'"): + test_path = test_path[1:-1] + continue + # Check for a tilde and expand if needed + if test_path[0] == "~": + tilde_expanded = os.path.expanduser(test_path) + if tilde_expanded != test_path: + # Got a change + test_path = tilde_expanded + continue + # Let's check for spaces - strip from the left first, then the right + if test_path[0] in (" ","\t"): + test_path = test_path[1:] + continue + if test_path[-1] in (" ","\t"): + test_path = test_path[:-1] + continue + # Maybe we have escapes to handle? + test_path = "\\".join([x.replace("\\", "") for x in test_path.split("\\\\")]) + + def grab(self, prompt, **kwargs): + # Takes a prompt, a default, and a timeout and shows it with that timeout + # returning the result + timeout = kwargs.get("timeout",0) + default = kwargs.get("default","") + # If we don't have a timeout - then skip the timed sections + if timeout <= 0: + try: + if sys.version_info >= (3, 0): + return input(prompt) + else: + return str(raw_input(prompt)) + except EOFError: + return default + # Write our prompt + sys.stdout.write(prompt) + sys.stdout.flush() + if os.name == "nt": + start_time = time.time() + i = '' + while True: + if msvcrt.kbhit(): + c = msvcrt.getche() + if ord(c) == 13: # enter_key + break + elif ord(c) >= 32: # space_char + i += c.decode() if sys.version_info >= (3,0) and isinstance(c,bytes) else c + else: + time.sleep(0.02) # Delay for 20ms to prevent CPU workload + if len(i) == 0 and (time.time() - start_time) > timeout: + break + else: + i, o, e = select.select( [sys.stdin], [], [], timeout ) + if i: + i = sys.stdin.readline().strip() + print('') # needed to move to next line + if len(i) > 0: + return i + else: + return default + + def cls(self): + if os.name == "nt": + os.system("cls") + elif os.environ.get("TERM"): + os.system("clear") + + def cprint(self, message, **kwargs): + strip_colors = kwargs.get("strip_colors", False) + if os.name == "nt": + strip_colors = True + reset = u"\u001b[0m" + # Requires sys import + for c in self.colors: + if strip_colors: + message = message.replace(c["find"], "") + else: + message = message.replace(c["find"], c["replace"]) + if strip_colors: + return message + sys.stdout.write(message) + print(reset) + + # Needs work to resize the string if color chars exist + '''# Header drawing method + def head(self, text = None, width = 55): + if text == None: + text = self.name + self.cls() + print(" {}".format("#"*width)) + len_text = self.cprint(text, strip_colors=True) + mid_len = int(round(width/2-len(len_text)/2)-2) + middle = " #{}{}{}#".format(" "*mid_len, len_text, " "*((width - mid_len - len(len_text))-2)) + if len(middle) > width+1: + # Get the difference + di = len(middle) - width + # Add the padding for the ...# + di += 3 + # Trim the string + middle = middle[:-di] + newlen = len(middle) + middle += "...#" + find_list = [ c["find"] for c in self.colors ] + + # Translate colored string to len + middle = middle.replace(len_text, text + self.rt_color) # always reset just in case + self.cprint(middle) + print("#"*width)''' + + # Header drawing method + def head(self, text = None, width = 55): + if text == None: + text = self.name + self.cls() + print(" {}".format("#"*width)) + mid_len = int(round(width/2-len(text)/2)-2) + middle = " #{}{}{}#".format(" "*mid_len, text, " "*((width - mid_len - len(text))-2)) + if len(middle) > width+1: + # Get the difference + di = len(middle) - width + # Add the padding for the ...# + di += 3 + # Trim the string + middle = middle[:-di] + "...#" + print(middle) + print("#"*width) + + def resize(self, width, height): + print('\033[8;{};{}t'.format(height, width)) + + def custom_quit(self): + self.head() + print("by CorpNewt\n") + print("Thanks for testing it out, for bugs/comments/complaints") + print("send me a message on Reddit, or check out my GitHub:\n") + print("www.reddit.com/u/corpnewt") + print("www.github.com/corpnewt\n") + # Get the time and wish them a good morning, afternoon, evening, and night + hr = datetime.datetime.now().time().hour + if hr > 3 and hr < 12: + print("Have a nice morning!\n\n") + elif hr >= 12 and hr < 17: + print("Have a nice afternoon!\n\n") + elif hr >= 17 and hr < 21: + print("Have a nice evening!\n\n") + else: + print("Have a nice night!\n\n") + exit(0) diff --git a/EFI/OC/config.plist b/EFI/OC/config.plist index 1d0527b..d9ecff5 100644 --- a/EFI/OC/config.plist +++ b/EFI/OC/config.plist @@ -1773,7 +1773,7 @@ SystemAudioVolume Rg== boot-args - -v keepsyms=1 debug=0x100 + -v keepsyms=1 debug=0x100 igfxonln=1 csr-active-config AAAAAA== prev-lang:kbd @@ -2127,7 +2127,7 @@ Comment Fix black screen on wake from hibernation for Lenovo Thinkpad T490 Enabled - + Size 4096 Type