mirror of
https://github.com/NationalSecurityAgency/ghidra.git
synced 2024-11-23 04:32:12 +00:00
Merge remote-tracking branch 'origin/patch' into stable
This commit is contained in:
commit
413220e069
@ -22,6 +22,39 @@
|
||||
|
||||
<BODY>
|
||||
|
||||
<H1 align="center">Ghidra 11.2.1 Change History (November 2024)</H1>
|
||||
<blockquote><p><u><B>New Features</B></u></p>
|
||||
<ul>
|
||||
<li><I>Debugger:GDB</I>. Added GDB batch scripts for Windows. Also, updated ssh+gdb scripts to take path to ssh as an option. (GP-4957, Issue #7069)</li>
|
||||
</ul>
|
||||
</blockquote>
|
||||
<blockquote><p><u><B>Improvements</B></u></p>
|
||||
<ul>
|
||||
<li><I>Analysis</I>. Updated RTTI Analyzer to fix incorrect PDB symbols in anonymous namespaces when the PDB does not have complete namespace information and RTTI structures do. (GP-5053, Issue #3213)</li>
|
||||
<li><I>Decompiler</I>. Added support for detecting optimized strings loaded from labeled memory. (GP-4979, Issue #6925, #6985)</li>
|
||||
</ul>
|
||||
</blockquote>
|
||||
<blockquote><p><u><B>Bugs</B></u></p>
|
||||
<ul>
|
||||
<li><I>Analysis</I>. Clearing of bad flow after a call that is found to be non-returning will no longer clear DLL Import table entries or defined data beyond <span class="gcode">Undefined<n></span>. (GP-4696, Issue #6636)</li>
|
||||
<li><I>BSim</I>. Fixed the Elasticsearch <span class="gcode">lsh.zip</span> plug-in for the <span class="gcode">BSimElasticPlugin</span> extension. (GP-5043, Issue #7051, #7054)</li>
|
||||
<li><I>BSim</I>. Corrected <span class="gcode">synchronous_commit</span> setting for BSim postgresql database and eliminated some warnings in the postgresql logs. We now ensure that all DB connections are closed properly when the Ghidra process exits normally to avoid unnecessary server logging of failed DB connection errors. (GP-5082, Issue #6951)</li>
|
||||
<li><I>DB</I>. Corrected issue closing <span class="gcode">FileDataTypeManager</span> affecting use of DataType Archives on Windows platforms which could produce an error during resource cleanup. (GP-5090)</li>
|
||||
<li><I>Debugger</I>. Fixed issue launching debuggers on Windows where the installation path contains spaces. (GP-5015, Issue #6999)</li>
|
||||
<li><I>Debugger:Emulator</I>. Fixed issue with stale disassembly in Emulator. (GP-4483)</li>
|
||||
<li><I>Debugger:Emulator</I>. Fixed Emulator to heed UI-driven patches to the program counter. (GP-4889, Issue #6867)</li>
|
||||
<li><I>Debugger:Trace</I>. Fixed bug in Listing with <span class="gtitle">Force Full View</span> enabled. (GP-2032)</li>
|
||||
<li><I>Decompiler</I>. Fixed a bug in the Decompiler causing <em>"Case block has become detached from switch"</em> exceptions. (GP-4899, Issue #6819)</li>
|
||||
<li><I>Decompiler</I>. Fixed bug that could cause an infinite loop in the Decompiler on functions accessing a 1-byte structure. (GP-4972, Issue #6969)</li>
|
||||
<li><I>Decompiler</I>. Fixed infinite loop in the Decompiler associated with overlapping fields in a structure. (GP-4985, Issue #6991)</li>
|
||||
<li><I>Emulator</I>. Fixed issue with slow stepping that occurred in the Emulator after an interrupt. (GP-4231, Issue #6109)</li>
|
||||
<li><I>Importer:Mach-O</I>. Fixed an exception that could occur in the MachoLoader when an External Program had a space in its name/path. (GP-4997, Issue #6989)</li>
|
||||
<li><I>Importer:PE</I>. Fixed an <em>exception</em> that could occur while parsing PE <span class="gcode">Control Flow Guard</span> structures, preventing the binary from successfully importing. (GP-5009, Issue #6960)</li>
|
||||
<li><I>Scripting</I>. Fixed a typo in <span class="gcode">VSCodeProjectScript.java</span> that resulted in the <span class="gcode">Extensions/Ghidra/Skeleton</span> directory not being found. (GP-4971, Issue #6971)</li>
|
||||
<li><I>Scripting</I>. Added vxworks external symbol markup, which was previously ignored. (GP-5000)</li>
|
||||
</ul>
|
||||
</blockquote>
|
||||
|
||||
<H1 align="center">Ghidra 11.2 Change History (September 2024)</H1>
|
||||
<blockquote><p><u><B>New Features</B></u></p>
|
||||
<ul>
|
||||
|
@ -2,6 +2,11 @@
|
||||
##MODULE IP: JSch License
|
||||
Module.manifest||GHIDRA||||END|
|
||||
data/debugger-launchers/local-gdb.bat||GHIDRA||||END|
|
||||
data/debugger-launchers/qemu-gdb.bat||GHIDRA||||END|
|
||||
data/debugger-launchers/raw-gdb.bat||GHIDRA||||END|
|
||||
data/debugger-launchers/remote-gdb.bat||GHIDRA||||END|
|
||||
data/debugger-launchers/ssh-gdb.bat||GHIDRA||||END|
|
||||
data/debugger-launchers/ssh-gdbserver.bat||GHIDRA||||END|
|
||||
data/scripts/fallback_info_proc_mappings.gdb||GHIDRA||||END|
|
||||
data/scripts/fallback_maintenance_info_sections.gdb||GHIDRA||||END|
|
||||
data/scripts/getpid-linux-i386.gdb||GHIDRA||||END|
|
||||
|
@ -10,12 +10,10 @@
|
||||
::@icon icon.debugger
|
||||
::@help TraceRmiLauncherServicePlugin#gdb
|
||||
::@enum StartCmd:str run start starti
|
||||
::@arg :file "Image" "The target binary executable image"
|
||||
::@args "Arguments" "Command-line arguments to pass to the target"
|
||||
::@env OPT_TARGET_IMG:file="" "Image" "The target binary executable image"
|
||||
::@env OPT_TARGET_ARGS:str="" "Arguments" "Command-line arguments to pass to the target"
|
||||
::@env OPT_GDB_PATH:file="gdb" "gdb command" "The path to gdb. Omit the full path to resolve using the system PATH."
|
||||
::@env OPT_START_CMD:StartCmd="starti" "Run command" "The gdb command to actually run the target."
|
||||
::@env OPT_EXTRA_TTY:bool=false "Inferior TTY" "Provide a separate terminal emulator for the target."
|
||||
::@tty TTY_TARGET if env:OPT_EXTRA_TTY
|
||||
|
||||
@echo off
|
||||
set PYTHONPATH0=%GHIDRA_HOME%\Ghidra\Debug\Debugger-agent-gdb\pypkg\src
|
||||
@ -30,24 +28,18 @@ IF EXIST %GHIDRA_HOME%\ghidra\.git (
|
||||
)
|
||||
set PYTHONPATH=%PYTHONPATH1%;%PYTHONPATH0%;%PYTHONPATH%
|
||||
|
||||
set target_image=%1
|
||||
shift
|
||||
set target_args=%*
|
||||
|
||||
"%OPT_GDB_PATH%" ^
|
||||
-q ^
|
||||
-ex "set pagination off" ^
|
||||
-ex "set confirm off" ^
|
||||
-ex "show version" ^
|
||||
-ex "python import ghidragdb" ^
|
||||
-ex "target exec %target_image%" ^
|
||||
-ex "set args %target_args%" ^
|
||||
-ex "target exec %OPT_TARGET_IMG%" ^
|
||||
-ex "set args %OPT_TARGET_ARGS%" ^
|
||||
-ex "set inferior-tty %TTY_TARGET%" ^
|
||||
-ex "ghidra trace connect '%GHIDRA_TRACE_RMI_ADDR%'" ^
|
||||
-ex "ghidra trace start" ^
|
||||
-ex "ghidra trace sync-enable" ^
|
||||
-ex "%OPT_START_CMD%" ^
|
||||
-ex "set confirm on" ^
|
||||
-ex "set pagination on" ^
|
||||
|
||||
|
||||
-ex "set pagination on"
|
||||
|
@ -0,0 +1,56 @@
|
||||
::@title qemu + gdb
|
||||
::@desc <html><body width="300px">
|
||||
::@desc <h3>Launch with <tt>qemu</tt> and connect with <tt>gdb</tt></h3>
|
||||
::@desc <p>
|
||||
::@desc This will launch the target on the local machine using <tt>qemu</tt>.
|
||||
::@desc Then in a second terminal, it will connect <tt>gdb</tt> to QEMU's GDBstub.
|
||||
::@desc For setup instructions, press <b>F1</b>.
|
||||
::@desc </p>
|
||||
::@desc </body></html>
|
||||
::@menu-group cross
|
||||
::@icon icon.debugger
|
||||
::@help TraceRmiLauncherServicePlugin#gdb_qemu
|
||||
::@env OPT_TARGET_IMG:file="" "Image" "The target binary executable image"
|
||||
::@env OPT_TARGET_ARGS:str="" "Arguments" "Command-line arguments to pass to the target"
|
||||
::@env GHIDRA_LANG_EXTTOOL_qemu:file="" "QEMU command" "The path to qemu for the target architecture."
|
||||
::@env QEMU_GDB:int=1234 "QEMU Port" "Port for gdb connection to qemu"
|
||||
::@env OPT_EXTRA_QEMU_ARGS:str="" "Extra qemu arguments" "Extra arguments to pass to qemu. Use with care."
|
||||
::@env OPT_GDB_PATH:file="gdb-multiarch" "gdb command" "The path to gdb. Omit the full path to resolve using the system PATH."
|
||||
::@env OPT_EXTRA_TTY:bool=false "QEMU TTY" "Provide a separate terminal emulator for the target."
|
||||
|
||||
@echo off
|
||||
set PYTHONPATH0=%GHIDRA_HOME%\Ghidra\Debug\Debugger-agent-gdb\pypkg\src
|
||||
set PYTHONPATH1=%GHIDRA_HOME%\Ghidra\Debug\Debugger-rmi-trace\pypkg\src
|
||||
IF EXIST %GHIDRA_HOME%\.git (
|
||||
set PYTHONPATH0=%GHIDRA_HOME%\Ghidra\Debug\Debugger-agent-gdb\build\pypkg\src
|
||||
set PYTHONPATH1=%GHIDRA_HOME%\Ghidra\Debug\Debugger-rmi-trace\build\pypkg\src
|
||||
)
|
||||
IF EXIST %GHIDRA_HOME%\ghidra\.git (
|
||||
set PYTHONPATH0=%GHIDRA_HOME%\ghidra\Ghidra\Debug\Debugger-agent-gdb\build\pypkg\src
|
||||
set PYTHONPATH1=%GHIDRA_HOME%\ghidra\Ghidra\Debug\Debugger-rmi-trace\build\pypkg\src
|
||||
)
|
||||
set PYTHONPATH=%PYTHONPATH1%;%PYTHONPATH0%;%PYTHONPATH%
|
||||
|
||||
IF "%OPT_EXTRA_TTY%"=="true" (
|
||||
start "qemu" "%GHIDRA_LANG_EXTTOOL_qemu%" %OPT_EXTRA_QEMU_ARGS% -gdb tcp::%QEMU_GDB% -S "%OPT_TARGET_IMG%" %OPT_TARGET_ARGS%
|
||||
) ELSE (
|
||||
start /B "qemu" "%GHIDRA_LANG_EXTTOOL_qemu%" %OPT_EXTRA_QEMU_ARGS% -gdb tcp::%QEMU_GDB% -S "%OPT_TARGET_IMG%" %OPT_TARGET_ARGS%
|
||||
)
|
||||
|
||||
:: Give QEMU a moment to open the socket
|
||||
powershell -nop -c "& {sleep -m 100}"
|
||||
|
||||
"%OPT_GDB_PATH%" ^
|
||||
-q ^
|
||||
-ex "set pagination off" ^
|
||||
-ex "set confirm off" ^
|
||||
-ex "show version" ^
|
||||
-ex "python import ghidragdb" ^
|
||||
-ex "target exec '%OPT_TARGET_IMG%'" ^
|
||||
-ex "set args %OPT_TARGET_ARGS%" ^
|
||||
-ex "ghidra trace connect '%GHIDRA_TRACE_RMI_ADDR%'" ^
|
||||
-ex "ghidra trace start" ^
|
||||
-ex "ghidra trace sync-enable" ^
|
||||
-ex "target remote localhost:%QEMU_GDB%" ^
|
||||
-ex "set confirm on" ^
|
||||
-ex "set pagination on"
|
@ -1,18 +1,18 @@
|
||||
#!/usr/bin/bash
|
||||
## ###
|
||||
# IP: GHIDRA
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# IP: GHIDRA
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
##
|
||||
#@title qemu + gdb
|
||||
#@desc <html><body width="300px">
|
||||
@ -29,7 +29,7 @@
|
||||
#@arg :file "Image" "The target binary executable image"
|
||||
#@args "Arguments" "Command-line arguments to pass to the target"
|
||||
#@env GHIDRA_LANG_EXTTOOL_qemu:file="" "QEMU command" "The path to qemu for the target architecture."
|
||||
#@env QEMU_GDB:int=12345 "QEMU Port" "Port for gdb connection to qemu"
|
||||
#@env QEMU_GDB:int=1234 "QEMU Port" "Port for gdb connection to qemu"
|
||||
#@env OPT_EXTRA_QEMU_ARGS:str="" "Extra qemu arguments" "Extra arguments to pass to qemu. Use with care."
|
||||
#@env OPT_GDB_PATH:file="gdb-multiarch" "gdb command" "The path to gdb. Omit the full path to resolve using the system PATH."
|
||||
#@env OPT_EXTRA_TTY:bool=false "QEMU TTY" "Provide a separate terminal emulator for the target."
|
||||
@ -68,7 +68,6 @@ sleep 0.1
|
||||
-ex "python import ghidragdb" \
|
||||
-ex "file \"$target_image\"" \
|
||||
-ex "set args $target_args" \
|
||||
-ex "set inferior-tty $TTY_TARGET" \
|
||||
-ex "ghidra trace connect \"$GHIDRA_TRACE_RMI_ADDR\"" \
|
||||
-ex "ghidra trace start" \
|
||||
-ex "ghidra trace sync-enable" \
|
||||
|
@ -0,0 +1,41 @@
|
||||
::@title raw gdb
|
||||
::@no-image
|
||||
::@desc <html><body width="300px">
|
||||
::@desc <h3>Start <tt>gdb</tt></h3>
|
||||
::@desc <p>
|
||||
::@desc This will start <tt>gdb</tt> and connect to it.
|
||||
::@desc It will not launch a target, so you can (must) set up your target manually.
|
||||
::@desc For setup instructions, press <b>F1</b>.
|
||||
::@desc </p>
|
||||
::@desc </body></html>
|
||||
::@menu-group raw
|
||||
::@icon icon.debugger
|
||||
::@help TraceRmiLauncherServicePlugin#gdb_raw
|
||||
::@env OPT_GDB_PATH:file="gdb" "gdb command" "The path to gdb. Omit the full path to resolve using the system PATH."
|
||||
::@env OPT_ARCH:str="i386:x86-64" "Architecture" "Target architecture"
|
||||
|
||||
@echo off
|
||||
set PYTHONPATH0=%GHIDRA_HOME%\Ghidra\Debug\Debugger-agent-gdb\pypkg\src
|
||||
set PYTHONPATH1=%GHIDRA_HOME%\Ghidra\Debug\Debugger-rmi-trace\pypkg\src
|
||||
IF EXIST %GHIDRA_HOME%\.git (
|
||||
set PYTHONPATH0=%GHIDRA_HOME%\Ghidra\Debug\Debugger-agent-gdb\build\pypkg\src
|
||||
set PYTHONPATH1=%GHIDRA_HOME%\Ghidra\Debug\Debugger-rmi-trace\build\pypkg\src
|
||||
)
|
||||
IF EXIST %GHIDRA_HOME%\ghidra\.git (
|
||||
set PYTHONPATH0=%GHIDRA_HOME%\ghidra\Ghidra\Debug\Debugger-agent-gdb\build\pypkg\src
|
||||
set PYTHONPATH1=%GHIDRA_HOME%\ghidra\Ghidra\Debug\Debugger-rmi-trace\build\pypkg\src
|
||||
)
|
||||
set PYTHONPATH=%PYTHONPATH1%;%PYTHONPATH0%;%PYTHONPATH%
|
||||
|
||||
"%OPT_GDB_PATH%" ^
|
||||
-q ^
|
||||
-ex "set pagination off" ^
|
||||
-ex "set confirm off" ^
|
||||
-ex "show version" ^
|
||||
-ex "python import ghidragdb" ^
|
||||
-ex "set architecture %OPT_ARCH%" ^
|
||||
-ex "ghidra trace connect '%GHIDRA_TRACE_RMI_ADDR%'" ^
|
||||
-ex "ghidra trace start" ^
|
||||
-ex "ghidra trace sync-enable" ^
|
||||
-ex "set confirm on" ^
|
||||
-ex "set pagination on"
|
@ -0,0 +1,53 @@
|
||||
::@title remote gdb
|
||||
::@no-image
|
||||
::@desc <html><body width="300px">
|
||||
::@desc <h3>Launch with local <tt>gdb</tt> and connect to a stub (e.g., <tt>gdbserver</tt>)</h3>
|
||||
::@desc <p>
|
||||
::@desc This will start <tt>gdb</tt> on the local system and then use it to connect to the remote system.
|
||||
::@desc For setup instructions, press <b>F1</b>.
|
||||
::@desc </p>
|
||||
::@desc </body></html>
|
||||
::@menu-group remote
|
||||
::@icon icon.debugger
|
||||
::@help TraceRmiLauncherServicePlugin#gdb_remote
|
||||
::@enum TargetType:str remote extended-remote
|
||||
::@env OPT_TARGET_TYPE:TargetType="remote" "Target" "The type of remote target"
|
||||
::@env OPT_HOST:str="localhost" "Host" "The hostname of the target"
|
||||
::@env OPT_PORT:int=9999 "Port" "The host's listening port"
|
||||
::@env OPT_ARCH:str="" "Architecture (optional)" "Target architecture override"
|
||||
::@env OPT_GDB_PATH:file="gdb" "gdb command" "The path to gdb on the local system. Omit the full path to resolve using the system PATH."
|
||||
|
||||
@echo off
|
||||
set PYTHONPATH0=%GHIDRA_HOME%\Ghidra\Debug\Debugger-agent-gdb\pypkg\src
|
||||
set PYTHONPATH1=%GHIDRA_HOME%\Ghidra\Debug\Debugger-rmi-trace\pypkg\src
|
||||
IF EXIST %GHIDRA_HOME%\.git (
|
||||
set PYTHONPATH0=%GHIDRA_HOME%\Ghidra\Debug\Debugger-agent-gdb\build\pypkg\src
|
||||
set PYTHONPATH1=%GHIDRA_HOME%\Ghidra\Debug\Debugger-rmi-trace\build\pypkg\src
|
||||
)
|
||||
IF EXIST %GHIDRA_HOME%\ghidra\.git (
|
||||
set PYTHONPATH0=%GHIDRA_HOME%\ghidra\Ghidra\Debug\Debugger-agent-gdb\build\pypkg\src
|
||||
set PYTHONPATH1=%GHIDRA_HOME%\ghidra\Ghidra\Debug\Debugger-rmi-trace\build\pypkg\src
|
||||
)
|
||||
set PYTHONPATH=%PYTHONPATH1%;%PYTHONPATH0%;%PYTHONPATH%
|
||||
|
||||
IF "%OPT_ARCH%"=="" (
|
||||
set archcmd=
|
||||
) ELSE (
|
||||
set archcmd=-ex "set arch %OPT_ARCH%"
|
||||
)
|
||||
|
||||
"%OPT_GDB_PATH%" ^
|
||||
-q ^
|
||||
-ex "set pagination off" ^
|
||||
-ex "set confirm off" ^
|
||||
-ex "show version" ^
|
||||
-ex "python import ghidragdb" ^
|
||||
%archcmd% ^
|
||||
-ex "echo Connecting to %OPT_HOST%:%OPT_PORT%... " ^
|
||||
-ex "target %OPT_TARGET_TYPE% %OPT_HOST%:%OPT_PORT%" ^
|
||||
-ex "ghidra trace connect '%GHIDRA_TRACE_RMI_ADDR%'" ^
|
||||
-ex "ghidra trace start" ^
|
||||
-ex "ghidra trace sync-enable" ^
|
||||
-ex "ghidra trace sync-synth-stopped" ^
|
||||
-ex "set confirm on" ^
|
||||
-ex "set pagination on"
|
@ -0,0 +1,39 @@
|
||||
::@timeout 60000
|
||||
::@title gdb via ssh
|
||||
::@desc <html><body width="300px">
|
||||
::@desc <h3>Launch with <tt>gdb</tt> via <tt>ssh</tt></h3>
|
||||
::@desc <p>
|
||||
::@desc This will launch the target on a remote machine using <tt>gdb</tt> via <tt>ssh</tt>.
|
||||
::@desc For setup instructions, press <b>F1</b>.
|
||||
::@desc </p>
|
||||
::@desc </body></html>
|
||||
::@menu-group remote
|
||||
::@icon icon.debugger
|
||||
::@help TraceRmiLauncherServicePlugin#gdb_ssh
|
||||
::@enum StartCmd:str run start starti
|
||||
::@env OPT_TARGET_IMG:str="" "Image" "The target binary executable image on the remote system"
|
||||
::@env OPT_TARGET_ARGS:str="" "Arguments" "Command-line arguments to pass to the target"
|
||||
::@env OPT_SSH_PATH:file="ssh" "ssh command" "The path to ssh on the local system. Omit the full path to resolve using the system PATH."
|
||||
::@env OPT_HOST:str="localhost" "[User@]Host" "The hostname or user@host"
|
||||
::@env OPT_REMOTE_PORT:int=12345 "Remote Trace RMI Port" "A free port on the remote end to receive and forward the Trace RMI connection."
|
||||
::@env OPT_EXTRA_SSH_ARGS:str="" "Extra ssh arguments" "Extra arguments to pass to ssh. Use with care."
|
||||
::@env OPT_GDB_PATH:str="gdb" "gdb command" "The path to gdb on the remote system. Omit the full path to resolve using the system PATH."
|
||||
::@env OPT_START_CMD:StartCmd="starti" "Run command" "The gdb command to actually run the target."
|
||||
|
||||
@echo off
|
||||
set cmd=TERM='%TERM%' '%OPT_GDB_PATH%' ^
|
||||
-q ^
|
||||
-ex 'set pagination off' ^
|
||||
-ex 'set confirm off' ^
|
||||
-ex 'show version' ^
|
||||
-ex 'python import ghidragdb' ^
|
||||
-ex 'file \"%OPT_TARGET_IMG%\"' ^
|
||||
-ex 'set args %OPT_TARGET_ARGS%' ^
|
||||
-ex 'ghidra trace connect \"localhost:%OPT_REMOTE_PORT%\"' ^
|
||||
-ex 'ghidra trace start' ^
|
||||
-ex 'ghidra trace sync-enable' ^
|
||||
-ex '%OPT_START_CMD%' ^
|
||||
-ex 'set confirm on' ^
|
||||
-ex 'set pagination on'
|
||||
|
||||
"%OPT_SSH_PATH%" "-R%OPT_REMOTE_PORT%:%GHIDRA_TRACE_RMI_ADDR%" -t %OPT_EXTRA_SSH_ARGS% "%OPT_HOST%" "%cmd%"
|
@ -1,18 +1,18 @@
|
||||
#!/usr/bin/bash
|
||||
## ###
|
||||
# IP: GHIDRA
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# IP: GHIDRA
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
##
|
||||
#@timeout 60000
|
||||
#@title gdb via ssh
|
||||
@ -29,6 +29,7 @@
|
||||
#@enum StartCmd:str run start starti
|
||||
#@arg :str "Image" "The target binary executable image on the remote system"
|
||||
#@args "Arguments" "Command-line arguments to pass to the target"
|
||||
#@env OPT_SSH_PATH:file="ssh" "ssh command" "The path to ssh on the local system. Omit the full path to resolve using the system PATH."
|
||||
#@env OPT_HOST:str="localhost" "[User@]Host" "The hostname or user@host"
|
||||
#@env OPT_REMOTE_PORT:int=12345 "Remote Trace RMI Port" "A free port on the remote end to receive and forward the Trace RMI connection."
|
||||
#@env OPT_EXTRA_SSH_ARGS:str="" "Extra ssh arguments" "Extra arguments to pass to ssh. Use with care."
|
||||
@ -39,7 +40,7 @@ target_image="$1"
|
||||
shift
|
||||
target_args="$@"
|
||||
|
||||
ssh "-R$OPT_REMOTE_PORT:$GHIDRA_TRACE_RMI_ADDR" -t $OPT_EXTRA_SSH_ARGS "$OPT_HOST" "TERM='$TERM' '$OPT_GDB_PATH' \
|
||||
"$OPT_SSH_PATH" "-R$OPT_REMOTE_PORT:$GHIDRA_TRACE_RMI_ADDR" -t $OPT_EXTRA_SSH_ARGS "$OPT_HOST" "TERM='$TERM' '$OPT_GDB_PATH' \
|
||||
-q \
|
||||
-ex 'set pagination off' \
|
||||
-ex 'set confirm off' \
|
||||
|
@ -0,0 +1,47 @@
|
||||
::@timeout 60000
|
||||
::@title gdb + gdbserver via ssh
|
||||
::@desc <html><body width="300px">
|
||||
::@desc <h3>Launch with local <tt>gdb</tt> and <tt>gdbserver</tt> via <tt>ssh</tt></h3>
|
||||
::@desc <p>
|
||||
::@desc This will start <tt>gdb</tt> on the local system and then use it to connect and launch the target in <tt>gdbserver</tt> on the remote system via <tt>ssh</tt>.
|
||||
::@desc For setup instructions, press <b>F1</b>.
|
||||
::@desc </p>
|
||||
::@desc </body></html>
|
||||
::@menu-group remote
|
||||
::@icon icon.debugger
|
||||
::@help TraceRmiLauncherServicePlugin#gdb_gdbserver_ssh
|
||||
::@env OPT_TARGET_IMG:str="" "Image" "The target binary executable image on the remote system"
|
||||
::@env OPT_TARGET_ARGS:str="" "Arguments" "Command-line arguments to pass to the target"
|
||||
::@env OPT_SSH_PATH:file="ssh" "ssh command" "The path to ssh on the local system. Omit the full path to resolve using the system PATH."
|
||||
::@env OPT_HOST:str="localhost" "[User@]Host" "The hostname or user@host"
|
||||
::@env OPT_EXTRA_SSH_ARGS:str="" "Extra ssh arguments" "Extra arguments to pass to ssh. Use with care."
|
||||
::@env OPT_GDBSERVER_PATH:str="gdbserver" "gdbserver command (remote)" "The path to gdbserver on the remote system. Omit the full path to resolve using the system PATH."
|
||||
::@env OPT_EXTRA_GDBSERVER_ARGS:str="" "Extra gdbserver arguments" "Extra arguments to pass to gdbserver. Use with care."
|
||||
::@env OPT_GDB_PATH:file="gdb" "gdb command" "The path to gdb on the local system. Omit the full path to resolve using the system PATH."
|
||||
|
||||
@echo off
|
||||
set PYTHONPATH0=%GHIDRA_HOME%\Ghidra\Debug\Debugger-agent-gdb\pypkg\src
|
||||
set PYTHONPATH1=%GHIDRA_HOME%\Ghidra\Debug\Debugger-rmi-trace\pypkg\src
|
||||
IF EXIST %GHIDRA_HOME%\.git (
|
||||
set PYTHONPATH0=%GHIDRA_HOME%\Ghidra\Debug\Debugger-agent-gdb\build\pypkg\src
|
||||
set PYTHONPATH1=%GHIDRA_HOME%\Ghidra\Debug\Debugger-rmi-trace\build\pypkg\src
|
||||
)
|
||||
IF EXIST %GHIDRA_HOME%\ghidra\.git (
|
||||
set PYTHONPATH0=%GHIDRA_HOME%\ghidra\Ghidra\Debug\Debugger-agent-gdb\build\pypkg\src
|
||||
set PYTHONPATH1=%GHIDRA_HOME%\ghidra\Ghidra\Debug\Debugger-rmi-trace\build\pypkg\src
|
||||
)
|
||||
set PYTHONPATH=%PYTHONPATH1%;%PYTHONPATH0%;%PYTHONPATH%
|
||||
|
||||
"%OPT_GDB_PATH%" ^
|
||||
-q ^
|
||||
-ex "set pagination off" ^
|
||||
-ex "set confirm off" ^
|
||||
-ex "show version" ^
|
||||
-ex "python import ghidragdb" ^
|
||||
-ex "target remote | '%OPT_SSH_PATH%' %OPT_EXTRA_SSH_ARGS% '%OPT_HOST%' '%OPT_GDBSERVER_PATH%' %OPT_EXTRA_GDBSERVER_ARGS% - '%OPT_TARGET_IMG%' %OPT_TARGET_ARGS%" ^
|
||||
-ex "ghidra trace connect '%GHIDRA_TRACE_RMI_ADDR%'" ^
|
||||
-ex "ghidra trace start" ^
|
||||
-ex "ghidra trace sync-enable" ^
|
||||
-ex "ghidra trace sync-synth-stopped" ^
|
||||
-ex "set confirm on" ^
|
||||
-ex "set pagination on"
|
@ -1,18 +1,18 @@
|
||||
#!/usr/bin/bash
|
||||
## ###
|
||||
# IP: GHIDRA
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# IP: GHIDRA
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
##
|
||||
#@timeout 60000
|
||||
#@title gdb + gdbserver via ssh
|
||||
@ -28,6 +28,7 @@
|
||||
#@help TraceRmiLauncherServicePlugin#gdb_gdbserver_ssh
|
||||
#@arg :str "Image" "The target binary executable image on the remote system"
|
||||
#@args "Arguments" "Command-line arguments to pass to the target"
|
||||
#@env OPT_SSH_PATH:file="ssh" "ssh command" "The path to ssh on the local system. Omit the full path to resolve using the system PATH."
|
||||
#@env OPT_HOST:str="localhost" "[User@]Host" "The hostname or user@host"
|
||||
#@env OPT_EXTRA_SSH_ARGS:str="" "Extra ssh arguments" "Extra arguments to pass to ssh. Use with care."
|
||||
#@env OPT_GDBSERVER_PATH:str="gdbserver" "gdbserver command (remote)" "The path to gdbserver on the remote system. Omit the full path to resolve using the system PATH."
|
||||
@ -53,8 +54,7 @@ fi
|
||||
-ex "set confirm off" \
|
||||
-ex "show version" \
|
||||
-ex "python import ghidragdb" \
|
||||
-ex "set inferior-tty $TTY_TARGET" \
|
||||
-ex "target remote | ssh $OPT_EXTRA_SSH_ARGS '$OPT_HOST' '$OPT_GDBSERVER_PATH' $OPT_EXTRA_GDBSERVER_ARGS - $@" \
|
||||
-ex "target remote | '$OPT_SSH_PATH' $OPT_EXTRA_SSH_ARGS '$OPT_HOST' '$OPT_GDBSERVER_PATH' $OPT_EXTRA_GDBSERVER_ARGS - $@" \
|
||||
-ex "ghidra trace connect \"$GHIDRA_TRACE_RMI_ADDR\"" \
|
||||
-ex "ghidra trace start" \
|
||||
-ex "ghidra trace sync-enable" \
|
||||
|
@ -4,9 +4,9 @@
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@ -17,6 +17,7 @@ package ghidra.app.plugin.core.debug.mapping;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Comparator;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import ghidra.app.plugin.core.debug.disassemble.DisassemblyInject;
|
||||
@ -27,7 +28,11 @@ import ghidra.framework.plugintool.PluginTool;
|
||||
import ghidra.program.model.address.*;
|
||||
import ghidra.program.model.lang.Endian;
|
||||
import ghidra.trace.model.Trace;
|
||||
import ghidra.trace.model.TraceAddressSnapRange;
|
||||
import ghidra.trace.model.guest.TracePlatform;
|
||||
import ghidra.trace.model.listing.TraceInstruction;
|
||||
import ghidra.trace.model.memory.TraceMemoryOperations;
|
||||
import ghidra.trace.model.memory.TraceMemoryState;
|
||||
import ghidra.trace.model.target.TraceObject;
|
||||
import ghidra.trace.model.thread.TraceThread;
|
||||
import ghidra.util.classfinder.ClassSearcher;
|
||||
@ -61,7 +66,13 @@ public abstract class AbstractDebuggerPlatformMapper implements DebuggerPlatform
|
||||
}
|
||||
|
||||
protected boolean isCancelSilently(Address start, long snap) {
|
||||
return trace.getCodeManager().instructions().getAt(snap, start) != null;
|
||||
TraceInstruction exists = trace.getCodeManager().instructions().getAt(snap, start);
|
||||
if (exists == null) {
|
||||
return false;
|
||||
}
|
||||
var states = trace.getMemoryManager().getStates(snap, exists.getRange());
|
||||
return TraceMemoryOperations.isStateEntirely(exists.getRange(), states,
|
||||
TraceMemoryState.KNOWN);
|
||||
}
|
||||
|
||||
protected Collection<DisassemblyInject> getDisassemblyInjections(TracePlatform platform) {
|
||||
|
@ -4,9 +4,9 @@
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@ -43,7 +43,8 @@ import ghidra.app.plugin.core.debug.gui.DebuggerResources;
|
||||
import ghidra.app.services.*;
|
||||
import ghidra.async.AsyncLazyMap;
|
||||
import ghidra.debug.api.control.ControlMode;
|
||||
import ghidra.debug.api.emulation.*;
|
||||
import ghidra.debug.api.emulation.DebuggerPcodeEmulatorFactory;
|
||||
import ghidra.debug.api.emulation.DebuggerPcodeMachine;
|
||||
import ghidra.debug.api.tracemgr.DebuggerCoordinates;
|
||||
import ghidra.framework.plugintool.*;
|
||||
import ghidra.framework.plugintool.annotation.AutoServiceConsumed;
|
||||
@ -781,7 +782,12 @@ public class DebuggerEmulationServicePlugin extends Plugin implements DebuggerEm
|
||||
TraceThread eventThread = key.time.getEventThread(key.trace);
|
||||
be.ce.emulator().setSoftwareInterruptMode(SwiMode.IGNORE_STEP);
|
||||
RunResult result = scheduler.run(key.trace, eventThread, be.ce.emulator(), monitor);
|
||||
key = new CacheKey(key.platform, key.time.advanced(result.schedule()));
|
||||
if (result.schedule().hasSteps()) {
|
||||
key = new CacheKey(key.platform, key.time.dropPSteps().advanced(result.schedule()));
|
||||
}
|
||||
else {
|
||||
key = new CacheKey(key.platform, key.time.advanced(result.schedule()));
|
||||
}
|
||||
Msg.info(this, "Stopped emulation at " + key.time);
|
||||
TraceSnapshot destSnap = writeToScratch(key, be.ce);
|
||||
cacheEmulator(key, be.ce);
|
||||
|
@ -38,11 +38,11 @@ import ghidra.app.plugin.core.debug.service.platform.DebuggerPlatformServicePlug
|
||||
import ghidra.app.services.DebuggerEmulationService.EmulationResult;
|
||||
import ghidra.app.services.DebuggerStaticMappingService;
|
||||
import ghidra.app.services.DebuggerTraceManagerService.ActivationCause;
|
||||
import ghidra.debug.api.emulation.DebuggerPcodeMachine;
|
||||
import ghidra.debug.api.platform.DebuggerPlatformMapper;
|
||||
import ghidra.debug.api.tracemgr.DebuggerCoordinates;
|
||||
import ghidra.pcode.emu.PcodeThread;
|
||||
import ghidra.pcode.exec.DecodePcodeExecutionException;
|
||||
import ghidra.pcode.exec.InterruptPcodeExecutionException;
|
||||
import ghidra.pcode.exec.*;
|
||||
import ghidra.pcode.utils.Utils;
|
||||
import ghidra.program.model.address.Address;
|
||||
import ghidra.program.model.address.AddressSpace;
|
||||
@ -489,25 +489,180 @@ public class DebuggerEmulationServiceTest extends AbstractGhidraHeadedDebuggerTe
|
||||
trace.getBreakpointManager()
|
||||
.addBreakpoint("Breakpoints[0]", Lifespan.nowOn(0), addrText, Set.of(thread),
|
||||
Set.of(TraceBreakpointKind.SW_EXECUTE), true, "test");
|
||||
trace.getBreakpointManager()
|
||||
TraceBreakpoint tb = trace.getBreakpointManager()
|
||||
.addBreakpoint("Breakpoints[1]", Lifespan.nowOn(0), addrI1, Set.of(thread),
|
||||
Set.of(TraceBreakpointKind.SW_EXECUTE), true, "test");
|
||||
// Force "partial instruction"
|
||||
tb.setEmuSleigh("""
|
||||
r1 = 0xbeef;
|
||||
emu_swi();
|
||||
emu_exec_decoded();
|
||||
""");
|
||||
trace.getBreakpointManager()
|
||||
.addBreakpoint("Breakpoints[2]", Lifespan.nowOn(0), addrI2, Set.of(thread),
|
||||
Set.of(TraceBreakpointKind.SW_EXECUTE), true, "test");
|
||||
}
|
||||
|
||||
assertEquals(0, emulationPlugin.cache.size());
|
||||
|
||||
// This is already testing if the one set at the entry is ignored
|
||||
EmulationResult result1 = emulationPlugin.run(trace.getPlatformManager().getHostPlatform(),
|
||||
TraceSchedule.snap(0), monitor, Scheduler.oneThread(thread));
|
||||
assertEquals(TraceSchedule.snap(0).steppedForward(thread, 1), result1.schedule());
|
||||
assertEquals(TraceSchedule.snap(0)
|
||||
.steppedForward(thread, 1)
|
||||
.steppedPcodeForward(thread, 2),
|
||||
result1.schedule());
|
||||
assertTrue(result1.error() instanceof InterruptPcodeExecutionException);
|
||||
|
||||
// Save this for comparison later
|
||||
DebuggerPcodeMachine<?> emu = Unique.assertOne(emulationPlugin.cache.values()).emulator();
|
||||
|
||||
// This will test if the one just hit gets ignored
|
||||
EmulationResult result2 = emulationPlugin.run(trace.getPlatformManager().getHostPlatform(),
|
||||
result1.schedule(), monitor, Scheduler.oneThread(thread));
|
||||
assertEquals(TraceSchedule.snap(0).steppedForward(thread, 2), result2.schedule());
|
||||
assertTrue(result1.error() instanceof InterruptPcodeExecutionException);
|
||||
|
||||
// For efficiency, esp. after a long run, make sure we used the same emulator
|
||||
assertSame(emu, Unique.assertOne(emulationPlugin.cache.values()).emulator());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testStepAfterExecutionBreakpoint() throws Exception {
|
||||
createProgram();
|
||||
intoProject(program);
|
||||
Assembler asm = Assemblers.getAssembler(program);
|
||||
Memory memory = program.getMemory();
|
||||
Address addrText = addr(program, 0x00400000);
|
||||
Address addrI1;
|
||||
try (Transaction tx = program.openTransaction("Initialize")) {
|
||||
MemoryBlock blockText = memory.createInitializedBlock(".text", addrText, 0x1000,
|
||||
(byte) 0, TaskMonitor.DUMMY, false);
|
||||
blockText.setExecute(true);
|
||||
InstructionIterator ii = asm.assemble(addrText,
|
||||
"mov r0, r0",
|
||||
"mov r0, r1",
|
||||
"mov r2, r0");
|
||||
ii.next(); // addrText
|
||||
addrI1 = ii.next().getMinAddress();
|
||||
}
|
||||
|
||||
programManager.openProgram(program);
|
||||
waitForSwing();
|
||||
codeBrowser.goTo(new ProgramLocation(program, addrText));
|
||||
waitForSwing();
|
||||
|
||||
performEnabledAction(codeBrowser.getProvider(), emulationPlugin.actionEmulateProgram, true);
|
||||
|
||||
Trace trace = traceManager.getCurrentTrace();
|
||||
assertNotNull(trace);
|
||||
|
||||
TraceThread thread = Unique.assertOne(trace.getThreadManager().getAllThreads());
|
||||
|
||||
try (Transaction tx = trace.openTransaction("Add breakpoint")) {
|
||||
trace.getBreakpointManager()
|
||||
.addBreakpoint("Breakpoints[0]", Lifespan.nowOn(0), addrText, Set.of(thread),
|
||||
Set.of(TraceBreakpointKind.SW_EXECUTE), true, "test");
|
||||
TraceBreakpoint tb = trace.getBreakpointManager()
|
||||
.addBreakpoint("Breakpoints[1]", Lifespan.nowOn(0), addrI1, Set.of(thread),
|
||||
Set.of(TraceBreakpointKind.SW_EXECUTE), true, "test");
|
||||
// Force "partial instruction"
|
||||
tb.setEmuSleigh("""
|
||||
r1 = 0xbeef;
|
||||
emu_swi();
|
||||
emu_exec_decoded();
|
||||
""");
|
||||
}
|
||||
|
||||
assertEquals(0, emulationPlugin.cache.size());
|
||||
|
||||
// This is already testing if the one set at the entry is ignored
|
||||
EmulationResult result1 = emulationPlugin.run(trace.getPlatformManager().getHostPlatform(),
|
||||
TraceSchedule.snap(0), monitor, Scheduler.oneThread(thread));
|
||||
assertEquals(TraceSchedule.snap(0)
|
||||
.steppedForward(thread, 1)
|
||||
.steppedPcodeForward(thread, 2),
|
||||
result1.schedule());
|
||||
assertTrue(result1.error() instanceof InterruptPcodeExecutionException);
|
||||
|
||||
// Save this for comparison later
|
||||
DebuggerPcodeMachine<?> emu = Unique.assertOne(emulationPlugin.cache.values()).emulator();
|
||||
|
||||
// Now, step it forward to complete the instruction
|
||||
emulationPlugin.emulate(trace.getPlatformManager().getHostPlatform(),
|
||||
TraceSchedule.snap(0).steppedForward(thread, 2), monitor);
|
||||
|
||||
// For efficiency, esp. after a long run, make sure we used the same emulator
|
||||
assertSame(emu, Unique.assertOne(emulationPlugin.cache.values()).emulator());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testStuckAtUserop() throws Exception {
|
||||
createProgram();
|
||||
intoProject(program);
|
||||
Assembler asm = Assemblers.getAssembler(program);
|
||||
Memory memory = program.getMemory();
|
||||
Address addrText = addr(program, 0x00400000);
|
||||
Address addrI1;
|
||||
try (Transaction tx = program.openTransaction("Initialize")) {
|
||||
MemoryBlock blockText = memory.createInitializedBlock(".text", addrText, 0x1000,
|
||||
(byte) 0, TaskMonitor.DUMMY, false);
|
||||
blockText.setExecute(true);
|
||||
InstructionIterator ii = asm.assemble(addrText,
|
||||
"mov r0, r0",
|
||||
"mov r0, r1",
|
||||
"mov r2, r0");
|
||||
ii.next(); // addrText
|
||||
addrI1 = ii.next().getMinAddress();
|
||||
}
|
||||
|
||||
programManager.openProgram(program);
|
||||
waitForSwing();
|
||||
codeBrowser.goTo(new ProgramLocation(program, addrText));
|
||||
waitForSwing();
|
||||
|
||||
performEnabledAction(codeBrowser.getProvider(), emulationPlugin.actionEmulateProgram, true);
|
||||
|
||||
Trace trace = traceManager.getCurrentTrace();
|
||||
assertNotNull(trace);
|
||||
|
||||
TraceThread thread = Unique.assertOne(trace.getThreadManager().getAllThreads());
|
||||
|
||||
try (Transaction tx = trace.openTransaction("Add breakpoint")) {
|
||||
TraceBreakpoint tb = trace.getBreakpointManager()
|
||||
.addBreakpoint("Breakpoints[1]", Lifespan.nowOn(0), addrI1, Set.of(thread),
|
||||
Set.of(TraceBreakpointKind.SW_EXECUTE), true, "test");
|
||||
// Force "partial instruction"
|
||||
tb.setEmuSleigh("""
|
||||
r1 = 0xbeef;
|
||||
pcodeop_one(r1);
|
||||
emu_exec_decoded();
|
||||
""");
|
||||
}
|
||||
|
||||
TraceSchedule stuck = TraceSchedule.snap(0)
|
||||
.steppedForward(thread, 1)
|
||||
.steppedPcodeForward(thread, 2);
|
||||
|
||||
assertEquals(0, emulationPlugin.cache.size());
|
||||
|
||||
// This is already testing if the one set at the entry is ignored
|
||||
EmulationResult result1 = emulationPlugin.run(trace.getPlatformManager().getHostPlatform(),
|
||||
TraceSchedule.snap(0), monitor, Scheduler.oneThread(thread));
|
||||
assertEquals(stuck, result1.schedule());
|
||||
assertTrue(result1.error() instanceof PcodeExecutionException);
|
||||
|
||||
// Save this for comparison later
|
||||
DebuggerPcodeMachine<?> emu = Unique.assertOne(emulationPlugin.cache.values()).emulator();
|
||||
|
||||
// We shouldn't get any further
|
||||
EmulationResult result2 = emulationPlugin.run(trace.getPlatformManager().getHostPlatform(),
|
||||
result1.schedule(), monitor, Scheduler.oneThread(thread));
|
||||
assertEquals(stuck, result2.schedule());
|
||||
assertTrue(result1.error() instanceof PcodeExecutionException);
|
||||
|
||||
// For efficiency, esp. after a long run, make sure we used the same emulator
|
||||
assertSame(emu, Unique.assertOne(emulationPlugin.cache.values()).emulator());
|
||||
}
|
||||
|
||||
@Test
|
||||
@ -817,6 +972,7 @@ public class DebuggerEmulationServiceTest extends AbstractGhidraHeadedDebuggerTe
|
||||
newSnap, program, tb.addr(0x00400000), addr(program, 0x00400000));
|
||||
newTraceThread.setName("MyThread");
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
PcodeThread<byte[]> newEmuThread = emulator.newThread(newTraceThread.getPath());
|
||||
}
|
||||
}
|
||||
|
@ -5,7 +5,7 @@
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@ -132,33 +132,27 @@ public class ShellUtils {
|
||||
if (args.isEmpty()) {
|
||||
return "";
|
||||
}
|
||||
StringBuilder line = new StringBuilder(args.get(0));
|
||||
StringBuilder line = new StringBuilder(genreateArgument(args.get(0)));
|
||||
for (int i = 1; i < args.size(); i++) {
|
||||
String a = args.get(i);
|
||||
if (a.contains(" ")) {
|
||||
if (a.contains("\"")) {
|
||||
if (a.contains("'")) {
|
||||
line.append(" \"");
|
||||
line.append(a.replace("\"", "\\\""));
|
||||
line.append("\"");
|
||||
continue;
|
||||
}
|
||||
line.append(" '");
|
||||
line.append(a);
|
||||
line.append("'");
|
||||
continue;
|
||||
}
|
||||
line.append(" \"");
|
||||
line.append(a);
|
||||
line.append("\"");
|
||||
continue;
|
||||
}
|
||||
line.append(" ");
|
||||
line.append(a);
|
||||
line.append(" " + genreateArgument(a));
|
||||
}
|
||||
return line.toString();
|
||||
}
|
||||
|
||||
public static String genreateArgument(String a) {
|
||||
if (a.contains(" ")) {
|
||||
if (a.contains("\"")) {
|
||||
if (a.contains("'")) {
|
||||
return "\"" + a.replace("\"", "\\\"") + "\"";
|
||||
}
|
||||
return "'" + a + "'";
|
||||
}
|
||||
return "\"" + a + "\"";
|
||||
}
|
||||
return a;
|
||||
}
|
||||
|
||||
public static String generateEnvBlock(Map<String, String> env) {
|
||||
return env.entrySet()
|
||||
.stream()
|
||||
|
@ -4,9 +4,9 @@
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@ -16,6 +16,7 @@
|
||||
package ghidra.trace.database.listing;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.Map.Entry;
|
||||
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
@ -29,9 +30,13 @@ import ghidra.program.model.util.CodeUnitInsertionException;
|
||||
import ghidra.trace.database.context.DBTraceRegisterContextManager;
|
||||
import ghidra.trace.database.context.DBTraceRegisterContextSpace;
|
||||
import ghidra.trace.database.guest.InternalTracePlatform;
|
||||
import ghidra.trace.database.memory.DBTraceMemoryManager;
|
||||
import ghidra.trace.database.memory.DBTraceMemorySpace;
|
||||
import ghidra.trace.model.*;
|
||||
import ghidra.trace.model.guest.TracePlatform;
|
||||
import ghidra.trace.model.listing.*;
|
||||
import ghidra.trace.model.memory.TraceMemoryOperations;
|
||||
import ghidra.trace.model.memory.TraceMemoryState;
|
||||
import ghidra.trace.util.*;
|
||||
import ghidra.util.LockHold;
|
||||
import ghidra.util.exception.CancelledException;
|
||||
@ -92,7 +97,7 @@ public class DBTraceInstructionsView extends AbstractBaseDBTraceDefinedUnitsView
|
||||
|
||||
protected void truncateOrDelete(TraceInstruction exists) {
|
||||
if (exists.getStartSnap() < lifespan.lmin()) {
|
||||
exists.setEndSnap(lifespan.lmin());
|
||||
exists.setEndSnap(lifespan.lmin() - 1);
|
||||
}
|
||||
else {
|
||||
exists.delete();
|
||||
@ -471,6 +476,15 @@ public class DBTraceInstructionsView extends AbstractBaseDBTraceDefinedUnitsView
|
||||
conflict, conflictCodeUnit, overwrite);
|
||||
}
|
||||
|
||||
protected boolean isKnown(DBTraceMemorySpace ms, long snap, CodeUnit cu) {
|
||||
if (ms == null) {
|
||||
return false;
|
||||
}
|
||||
AddressRange range = new AddressRangeImpl(cu.getMinAddress(), cu.getMaxAddress());
|
||||
var states = ms.getStates(snap, range);
|
||||
return TraceMemoryOperations.isStateEntirely(range, states, TraceMemoryState.KNOWN);
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks the intended locations for conflicts with existing units.
|
||||
*
|
||||
@ -486,6 +500,7 @@ public class DBTraceInstructionsView extends AbstractBaseDBTraceDefinedUnitsView
|
||||
Set<Address> skipDelaySlots) {
|
||||
// NOTE: Partly derived from CodeManager#checkInstructionSet()
|
||||
// Attempted to factor more fluently
|
||||
DBTraceMemoryManager mm = space.trace.getMemoryManager();
|
||||
for (InstructionBlock block : instructionSet) {
|
||||
// If block contains a known error, record its address, and do not proceed beyond it
|
||||
Address errorAddress = null;
|
||||
@ -519,6 +534,12 @@ public class DBTraceInstructionsView extends AbstractBaseDBTraceDefinedUnitsView
|
||||
lastProtoInstr = protoInstr;
|
||||
}
|
||||
CodeUnit existsCu = overlap.getRight();
|
||||
DBTraceMemorySpace ms =
|
||||
mm.getMemorySpace(existsCu.getAddress().getAddressSpace(), false);
|
||||
if (!isKnown(ms, startSnap, existsCu) && existsCu instanceof TraceCodeUnit tcu) {
|
||||
tcu.delete();
|
||||
continue;
|
||||
}
|
||||
int cmp = existsCu.getMinAddress().compareTo(protoInstr.getMinAddress());
|
||||
boolean existsIsInstruction = (existsCu instanceof TraceInstruction);
|
||||
if (cmp == 0 && existsIsInstruction) {
|
||||
@ -552,7 +573,7 @@ public class DBTraceInstructionsView extends AbstractBaseDBTraceDefinedUnitsView
|
||||
}
|
||||
// NOTE: existsIsInstruction implies cmp != 0, so record as off-cut conflict
|
||||
block.setCodeUnitConflict(existsCu.getAddress(), protoInstr.getAddress(),
|
||||
flowFromAddress, existsIsInstruction, existsIsInstruction);
|
||||
flowFromAddress, existsIsInstruction, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -4,9 +4,9 @@
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@ -39,13 +39,17 @@ public class DBTraceAddressSnapRangePropertyMapAddressSetView<T> extends Abstrac
|
||||
private final Predicate<? super T> predicate;
|
||||
|
||||
/**
|
||||
* TODO Document me
|
||||
* Construct an {@link AddressSetView} based on the given map of entries and predicate.
|
||||
*
|
||||
* The caller must reduce the map if only a certain range is desired.
|
||||
* <p>
|
||||
* The spatial map is a 2-dimensional collection of entries, but only the address dimension is
|
||||
* considered. This set behaves as the union of address ranges for all entries whose values pass
|
||||
* the predicate. Typically, the caller reduces the map first.
|
||||
*
|
||||
* @param lock
|
||||
* @param map
|
||||
* @param predicate
|
||||
* @param space the address space of the given map
|
||||
* @param lock a lock to ensure access to the underlying database is synchronized
|
||||
* @param map the map whose entries to test
|
||||
* @param predicate the predicate for testing entry values
|
||||
*/
|
||||
public DBTraceAddressSnapRangePropertyMapAddressSetView(AddressSpace space, ReadWriteLock lock,
|
||||
SpatialMap<TraceAddressSnapRange, T, TraceAddressSnapRangeQuery> map,
|
||||
|
@ -4,9 +4,9 @@
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@ -515,6 +515,12 @@ public class DBTraceAddressSnapRangePropertyMapTree<T, DR extends AbstractDBTrac
|
||||
Rectangle2DDirection.TOPMOST, TraceAddressSnapRangeQuery::new);
|
||||
}
|
||||
|
||||
public static TraceAddressSnapRangeQuery mostRecent(AddressRange range, Lifespan span) {
|
||||
return intersecting(
|
||||
new ImmutableTraceAddressSnapRange(range, span),
|
||||
Rectangle2DDirection.TOPMOST, TraceAddressSnapRangeQuery::new);
|
||||
}
|
||||
|
||||
public static TraceAddressSnapRangeQuery equalTo(TraceAddressSnapRange shape) {
|
||||
return equalTo(shape, null, TraceAddressSnapRangeQuery::new);
|
||||
}
|
||||
|
@ -4,9 +4,9 @@
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@ -279,6 +279,13 @@ public class DBTraceMemoryManager extends AbstractDBTraceSpaceBasedManager<DBTra
|
||||
m -> m.getMostRecentStateEntry(snap, address));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Entry<TraceAddressSnapRange, TraceMemoryState> getViewMostRecentStateEntry(long snap,
|
||||
AddressRange range, Predicate<TraceMemoryState> predicate) {
|
||||
return delegateRead(range.getAddressSpace(),
|
||||
m -> m.getViewMostRecentStateEntry(snap, range, predicate));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Entry<TraceAddressSnapRange, TraceMemoryState> getViewMostRecentStateEntry(long snap,
|
||||
Address address) {
|
||||
|
@ -406,11 +406,17 @@ public class DBTraceMemorySpace
|
||||
@Override
|
||||
public Entry<TraceAddressSnapRange, TraceMemoryState> getViewMostRecentStateEntry(long snap,
|
||||
Address address) {
|
||||
return getViewMostRecentStateEntry(snap, new AddressRangeImpl(address, address), s -> true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Entry<TraceAddressSnapRange, TraceMemoryState> getViewMostRecentStateEntry(long snap,
|
||||
AddressRange range, Predicate<TraceMemoryState> predicate) {
|
||||
assertInSpace(range);
|
||||
for (Lifespan span : viewport.getOrderedSpans(snap)) {
|
||||
Entry<TraceAddressSnapRange, TraceMemoryState> entry =
|
||||
stateMapSpace.reduce(TraceAddressSnapRangeQuery.mostRecent(address, span))
|
||||
.firstEntry();
|
||||
if (entry != null) {
|
||||
var entry = stateMapSpace.reduce(TraceAddressSnapRangeQuery.mostRecent(range, span))
|
||||
.firstEntry();
|
||||
if (entry != null && predicate.test(entry.getValue())) {
|
||||
return entry;
|
||||
}
|
||||
}
|
||||
|
@ -22,6 +22,7 @@ import org.apache.commons.collections4.IteratorUtils;
|
||||
|
||||
import generic.NestedIterator;
|
||||
import ghidra.program.database.ProgramDB;
|
||||
import ghidra.program.database.code.InstructionDB;
|
||||
import ghidra.program.database.function.OverlappingFunctionException;
|
||||
import ghidra.program.model.address.*;
|
||||
import ghidra.program.model.data.DataType;
|
||||
@ -41,6 +42,7 @@ import ghidra.trace.database.thread.DBTraceThread;
|
||||
import ghidra.trace.model.*;
|
||||
import ghidra.trace.model.listing.*;
|
||||
import ghidra.trace.model.memory.TraceMemoryRegion;
|
||||
import ghidra.trace.model.memory.TraceMemoryState;
|
||||
import ghidra.trace.model.program.TraceProgramView;
|
||||
import ghidra.trace.model.program.TraceProgramViewListing;
|
||||
import ghidra.trace.model.property.TracePropertyMapOperations;
|
||||
@ -65,8 +67,8 @@ public abstract class AbstractDBTraceProgramViewListing implements TraceProgramV
|
||||
public int getBytes(ByteBuffer buffer, int addressOffset) {
|
||||
DBTraceMemorySpace mem = trace.getMemoryManager().get(this, false);
|
||||
if (mem == null) {
|
||||
// TODO: 0-fill instead? Will need to check memory space bounds.
|
||||
return 0;
|
||||
buffer.put((byte) 0);
|
||||
return 1;
|
||||
}
|
||||
return mem.getViewBytes(program.snap, address.add(addressOffset), buffer);
|
||||
}
|
||||
@ -717,9 +719,21 @@ public abstract class AbstractDBTraceProgramViewListing implements TraceProgramV
|
||||
public Instruction createInstruction(Address addr, InstructionPrototype prototype,
|
||||
MemBuffer memBuf, ProcessorContextView context, int forcedLengthOverride)
|
||||
throws CodeUnitInsertionException {
|
||||
// TODO: Why memBuf? Can it vary from program memory?
|
||||
int checkLengthOverride =
|
||||
InstructionDB.checkLengthOverride(forcedLengthOverride, prototype);
|
||||
int length = checkLengthOverride != 0 ? checkLengthOverride : prototype.getLength();
|
||||
AddressRange range;
|
||||
try {
|
||||
range = new AddressRangeImpl(addr, length);
|
||||
}
|
||||
catch (AddressOverflowException e) {
|
||||
throw new CodeUnitInsertionException("Code unit would extend beyond address space");
|
||||
}
|
||||
var mostRecent = program.memory.memoryManager.getViewMostRecentStateEntry(program.snap,
|
||||
range, s -> s == TraceMemoryState.KNOWN);
|
||||
long snap = mostRecent == null ? program.snap : mostRecent.getKey().getY2();
|
||||
return codeOperations.instructions()
|
||||
.create(Lifespan.nowOn(program.snap), addr, platform, prototype, context,
|
||||
.create(Lifespan.nowOn(snap), addr, platform, prototype, context,
|
||||
forcedLengthOverride);
|
||||
}
|
||||
|
||||
|
@ -1635,6 +1635,17 @@ public class DBTraceProgramView implements TraceProgramView {
|
||||
}
|
||||
|
||||
protected boolean isCodeVisible(TraceCodeUnit cu, Lifespan lifespan) {
|
||||
try {
|
||||
byte[] cubytes = cu.getBytes();
|
||||
byte[] mmbytes = new byte[cubytes.length];
|
||||
memory.getBytes(cu.getAddress(), mmbytes);
|
||||
if (!Arrays.equals(cubytes, mmbytes)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
catch (MemoryAccessException e) {
|
||||
throw new AssertionError(e);
|
||||
}
|
||||
return viewport.isCompletelyVisible(cu.getRange(), lifespan, cu,
|
||||
getCodeOcclusion(cu.getTraceSpace()));
|
||||
}
|
||||
|
@ -4,9 +4,9 @@
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@ -48,7 +48,13 @@ public interface TraceInstructionsView extends TraceBaseDefinedUnitsView<TraceIn
|
||||
/**
|
||||
* Create an instruction for the host platform
|
||||
*
|
||||
* @see #create(Lifespan, Address, TracePlatform, InstructionPrototype, ProcessorContextView)
|
||||
* @param lifespan the lifespan for the instruction unit
|
||||
* @param address the starting address of the instruction
|
||||
* @param prototype the instruction prototype
|
||||
* @param context the input disassembly context for the instruction
|
||||
* @param forcedLengthOverride reduced instruction byte-length (1..7) or 0 to use default length
|
||||
* @return the new instruction
|
||||
* @throws CodeUnitInsertionException if the instruction cannot be created
|
||||
*/
|
||||
default TraceInstruction create(Lifespan lifespan, Address address,
|
||||
InstructionPrototype prototype, ProcessorContextView context, int forcedLengthOverride)
|
||||
@ -76,7 +82,14 @@ public interface TraceInstructionsView extends TraceBaseDefinedUnitsView<TraceIn
|
||||
/**
|
||||
* Create several instructions for the host platform
|
||||
*
|
||||
* @see #addInstructionSet(Lifespan, TracePlatform, InstructionSet, boolean)
|
||||
* <p>
|
||||
* <b>NOTE:</b> This does not throw {@link CodeUnitInsertionException}. Conflicts are instead
|
||||
* recorded in the {@code instructionSet}.
|
||||
*
|
||||
* @param lifespan the lifespan for all instruction units
|
||||
* @param instructionSet the set of instructions to add
|
||||
* @param overwrite true to replace conflicting instructions
|
||||
* @return the (host) address set of instructions actually added
|
||||
*/
|
||||
default AddressSetView addInstructionSet(Lifespan lifespan, InstructionSet instructionSet,
|
||||
boolean overwrite) {
|
||||
|
@ -4,9 +4,9 @@
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@ -62,6 +62,38 @@ import ghidra.util.task.TaskMonitor;
|
||||
* accidentally rely on implied temporal relationships in scratch space.
|
||||
*/
|
||||
public interface TraceMemoryOperations {
|
||||
/**
|
||||
* Check if the return value of {@link #getStates(long, AddressRange)} or similar represents a
|
||||
* single entry of the given state.
|
||||
*
|
||||
* <p>
|
||||
* This method returns false if there is not exactly one entry of the given state whose range
|
||||
* covers the given range. As a special case, an empty collection will cause this method to
|
||||
* return true iff state is {@link TraceMemoryState#UNKNOWN}.
|
||||
*
|
||||
* @param range the range to check, usually that passed to
|
||||
* {@link #getStates(long, AddressRange)}.
|
||||
* @param stateEntries the collection returned by {@link #getStates(long, AddressRange)}.
|
||||
* @param state the expected state
|
||||
* @return true if the state matches
|
||||
*/
|
||||
static boolean isStateEntirely(AddressRange range,
|
||||
Collection<Entry<TraceAddressSnapRange, TraceMemoryState>> stateEntries,
|
||||
TraceMemoryState state) {
|
||||
if (stateEntries.isEmpty()) {
|
||||
return state == TraceMemoryState.UNKNOWN;
|
||||
}
|
||||
if (stateEntries.size() != 1) {
|
||||
return false;
|
||||
}
|
||||
Entry<TraceAddressSnapRange, TraceMemoryState> ent = stateEntries.iterator().next();
|
||||
if (ent.getValue() != state) {
|
||||
return false;
|
||||
}
|
||||
AddressRange entRange = ent.getKey().getRange();
|
||||
return entRange.contains(range.getMinAddress()) && entRange.contains(range.getMaxAddress());
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the trace to which the memory manager belongs
|
||||
*
|
||||
@ -261,13 +293,25 @@ public interface TraceMemoryOperations {
|
||||
* Get the entry recording the most recent state at the given snap and address, following
|
||||
* schedule forks
|
||||
*
|
||||
* @param snap the time
|
||||
* @param address the location
|
||||
* @return the state
|
||||
* @param snap the latest time to consider
|
||||
* @param address the address
|
||||
* @return the most-recent entry
|
||||
*/
|
||||
Entry<TraceAddressSnapRange, TraceMemoryState> getViewMostRecentStateEntry(long snap,
|
||||
Address address);
|
||||
|
||||
/**
|
||||
* Get the entry recording the most recent state since the given snap within the given range
|
||||
* that satisfies a given predicate, following schedule forks
|
||||
*
|
||||
* @param snap the latest time to consider
|
||||
* @param range the range of addresses
|
||||
* @param predicate a predicate on the state
|
||||
* @return the most-recent entry
|
||||
*/
|
||||
Entry<TraceAddressSnapRange, TraceMemoryState> getViewMostRecentStateEntry(long snap,
|
||||
AddressRange range, Predicate<TraceMemoryState> predicate);
|
||||
|
||||
/**
|
||||
* Get at least the subset of addresses having state satisfying the given predicate
|
||||
*
|
||||
|
@ -4,9 +4,9 @@
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@ -82,6 +82,10 @@ public class PatchStep implements Step {
|
||||
/**
|
||||
* Generate a single line of Sleigh
|
||||
*
|
||||
* @param language the target language
|
||||
* @param address the (start) address of the variable
|
||||
* @param data the bytes to write to the variable
|
||||
* @return the Sleigh code
|
||||
* @see #generateSleighLine(Language, Address, byte[], int)
|
||||
*/
|
||||
public static String generateSleighLine(Language language, Address address, byte[] data) {
|
||||
@ -169,10 +173,18 @@ public class PatchStep implements Step {
|
||||
if (register == null) {
|
||||
throw new IllegalArgumentException("Could not find a register for " + min);
|
||||
}
|
||||
if (register.getBaseRegister().isProgramCounter()) {
|
||||
register = register.getBaseRegister();
|
||||
}
|
||||
int length = register.getNumBytes();
|
||||
array.getData(min.getOffset(), data, 0, length);
|
||||
array.getData(register.getOffset(), data, 0, length);
|
||||
BigInteger value = Utils.bytesToBigInteger(data, length, language.isBigEndian(), false);
|
||||
result.add(String.format("%s=0x%s", register, value.toString(16)));
|
||||
if (register.isProgramCounter()) {
|
||||
result.add(String.format("goto 0x%s", value.toString(16)));
|
||||
}
|
||||
else {
|
||||
result.add(String.format("%s=0x%s", register, value.toString(16)));
|
||||
}
|
||||
remains.remove(spanOfRegister(register));
|
||||
}
|
||||
}
|
||||
@ -232,6 +244,11 @@ public class PatchStep implements Step {
|
||||
return StepType.PATCH;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getSkipCount() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isNop() {
|
||||
// TODO: If parsing beforehand, base on number of ops
|
||||
@ -307,7 +324,7 @@ public class PatchStep implements Step {
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> void execute(PcodeThread<T> emuThread, Stepper stepper, TaskMonitor monitor)
|
||||
public void execute(PcodeThread<?> emuThread, Stepper stepper, TaskMonitor monitor)
|
||||
throws CancelledException {
|
||||
emuThread.stepPatch(sleigh);
|
||||
}
|
||||
@ -366,20 +383,26 @@ public class PatchStep implements Step {
|
||||
// SemisparseArray is a bit overkill, no?
|
||||
Map<AddressSpace, SemisparseByteArray> result = new TreeMap<>();
|
||||
for (PcodeOp op : prog.getCode()) {
|
||||
// Only accept patches in form [mem/reg] = [constant]
|
||||
// Only accept patches in form [mem/reg] = [constant], or goto [constant]
|
||||
switch (op.getOpcode()) {
|
||||
case PcodeOp.COPY:
|
||||
case PcodeOp.COPY -> {
|
||||
if (!getPatchCopyOp(language, result, op)) {
|
||||
return null;
|
||||
}
|
||||
break;
|
||||
case PcodeOp.STORE:
|
||||
}
|
||||
case PcodeOp.STORE -> {
|
||||
if (!getPatchStoreOp(language, result, op)) {
|
||||
return null;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
}
|
||||
case PcodeOp.BRANCH -> {
|
||||
if (!getPatchBranchOp(language, result, op)) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
default -> {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
@ -405,8 +428,7 @@ public class PatchStep implements Step {
|
||||
}
|
||||
|
||||
protected boolean getPatchStoreOp(Language language,
|
||||
Map<AddressSpace, SemisparseByteArray> result,
|
||||
PcodeOp op) {
|
||||
Map<AddressSpace, SemisparseByteArray> result, PcodeOp op) {
|
||||
Varnode vnSpace = op.getInput(0);
|
||||
if (!vnSpace.isConstant()) {
|
||||
return false;
|
||||
@ -427,4 +449,17 @@ public class PatchStep implements Step {
|
||||
return true;
|
||||
}
|
||||
|
||||
protected boolean getPatchBranchOp(Language language,
|
||||
Map<AddressSpace, SemisparseByteArray> result, PcodeOp op) {
|
||||
Address target = op.getInput(0).getAddress();
|
||||
if (target.getAddressSpace() != language.getDefaultSpace()) {
|
||||
return false;
|
||||
}
|
||||
Register pc = language.getProgramCounter();
|
||||
SemisparseByteArray array =
|
||||
result.computeIfAbsent(pc.getAddressSpace(), as -> new SemisparseByteArray());
|
||||
array.putData(pc.getOffset(),
|
||||
Utils.longToBytes(target.getOffset(), pc.getNumBytes(), language.isBigEndian()));
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -4,9 +4,9 @@
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@ -81,14 +81,17 @@ public interface Scheduler {
|
||||
|
||||
/**
|
||||
* The result of running a machine
|
||||
*
|
||||
* @param schedule the actual schedule executed
|
||||
* @param error if applicable, the error that interrupted execution
|
||||
*/
|
||||
record RecordRunResult(TraceSchedule schedule, Throwable error) implements RunResult {
|
||||
}
|
||||
record RecordRunResult(TraceSchedule schedule, Throwable error) implements RunResult {}
|
||||
|
||||
/**
|
||||
* Get the next step to schedule
|
||||
*
|
||||
* @return the (instruction-level) thread and tick count
|
||||
* @param trace the trace being emulated
|
||||
* @return the thread and (instruction-level) tick count
|
||||
*/
|
||||
TickStep nextSlice(Trace trace);
|
||||
|
||||
@ -118,12 +121,17 @@ public interface Scheduler {
|
||||
TickStep slice = nextSlice(trace);
|
||||
eventThread = slice.getThread(tm, eventThread);
|
||||
emuThread = machine.getThread(eventThread.getPath(), true);
|
||||
if (emuThread.getFrame() != null) {
|
||||
long ticksLeft = slice.tickCount;
|
||||
if (ticksLeft > 0 && emuThread.getFrame() != null) {
|
||||
monitor.checkCancelled();
|
||||
emuThread.finishInstruction();
|
||||
ticksLeft--;
|
||||
completedTicks++;
|
||||
}
|
||||
for (int i = 0; i < slice.tickCount; i++) {
|
||||
while (ticksLeft > 0) {
|
||||
monitor.checkCancelled();
|
||||
emuThread.stepInstruction();
|
||||
ticksLeft--;
|
||||
completedTicks++;
|
||||
}
|
||||
completedSteps = completedSteps.steppedForward(eventThread, completedTicks);
|
||||
@ -134,11 +142,11 @@ public interface Scheduler {
|
||||
completedSteps = completedSteps.steppedForward(eventThread, completedTicks);
|
||||
PcodeFrame frame = emuThread.getFrame();
|
||||
if (frame == null) {
|
||||
return new RecordRunResult(completedSteps, e);
|
||||
return new RecordRunResult(completedSteps.assumeRecorded(), e);
|
||||
}
|
||||
// Rewind one so stepping retries the op causing the error
|
||||
frame.stepBack();
|
||||
int count = frame.count();
|
||||
int count = frame.resetCount();
|
||||
if (count == 0) {
|
||||
// If we've decoded, but could not execute the first op, just drop the p-code steps
|
||||
emuThread.dropInstruction();
|
||||
@ -146,11 +154,11 @@ public interface Scheduler {
|
||||
}
|
||||
// The +1 accounts for the decode step
|
||||
return new RecordRunResult(
|
||||
completedSteps.steppedPcodeForward(eventThread, count + 1), e);
|
||||
completedSteps.steppedPcodeForward(eventThread, count + 1).assumeRecorded(), e);
|
||||
}
|
||||
catch (CancelledException e) {
|
||||
return new RecordRunResult(
|
||||
completedSteps.steppedForward(eventThread, completedTicks), e);
|
||||
completedSteps.steppedForward(eventThread, completedTicks).assumeRecorded(), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -4,9 +4,9 @@
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@ -21,6 +21,7 @@ import java.util.stream.Collectors;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
|
||||
import ghidra.pcode.emu.PcodeMachine;
|
||||
import ghidra.pcode.emu.PcodeThread;
|
||||
import ghidra.program.model.lang.Language;
|
||||
import ghidra.trace.model.Trace;
|
||||
import ghidra.trace.model.thread.TraceThread;
|
||||
@ -240,7 +241,7 @@ public class Sequence implements Comparable<Sequence> {
|
||||
}
|
||||
|
||||
/**
|
||||
* Richly compare to sequences
|
||||
* Richly compare two sequences
|
||||
*
|
||||
* <p>
|
||||
* The result indicates not only which is "less" or "greater" than the other, but also indicates
|
||||
@ -355,6 +356,14 @@ public class Sequence implements Comparable<Sequence> {
|
||||
return count;
|
||||
}
|
||||
|
||||
public long totalSkipCount() {
|
||||
long count = 0;
|
||||
for (Step step : steps) {
|
||||
count += step.getSkipCount();
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
/**
|
||||
* Compute to total number of patches specified
|
||||
*
|
||||
@ -440,4 +449,25 @@ public class Sequence implements Comparable<Sequence> {
|
||||
}
|
||||
return thread;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if the first instruction step is actually to finish an incomplete instruction.
|
||||
*
|
||||
* @param thread the thread whose instruction to potentially finish
|
||||
* @param machine a machine bound to the trace whose current state reflects the given position
|
||||
* @return if a finish was performed, this sequence with one initial step removed, i.e., a
|
||||
* sequence representing the steps remaining
|
||||
*/
|
||||
Sequence checkFinish(TraceThread thread, PcodeMachine<?> machine) {
|
||||
PcodeThread<?> emuThread = machine.getThread(thread.getPath(), true);
|
||||
if (emuThread.getFrame() == null) {
|
||||
return this;
|
||||
}
|
||||
Sequence result = new Sequence(new ArrayList<>(steps));
|
||||
emuThread.finishInstruction();
|
||||
if (result.steps.get(0).rewind(1) == 0) {
|
||||
result.steps.remove(0);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
@ -4,9 +4,9 @@
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@ -48,6 +48,11 @@ public class SkipStep extends AbstractStep {
|
||||
return StepType.SKIP;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getSkipCount() {
|
||||
return tickCount;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String toStringStepPart() {
|
||||
return String.format("s%d", tickCount);
|
||||
@ -66,7 +71,7 @@ public class SkipStep extends AbstractStep {
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> void execute(PcodeThread<T> emuThread, Stepper stepper, TaskMonitor monitor)
|
||||
public void execute(PcodeThread<?> emuThread, Stepper stepper, TaskMonitor monitor)
|
||||
throws CancelledException {
|
||||
for (int i = 0; i < tickCount; i++) {
|
||||
monitor.incrementProgress(1);
|
||||
|
@ -4,9 +4,9 @@
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@ -116,6 +116,8 @@ public interface Step extends Comparable<Step> {
|
||||
|
||||
long getTickCount();
|
||||
|
||||
long getSkipCount();
|
||||
|
||||
long getPatchCount();
|
||||
|
||||
/**
|
||||
@ -147,7 +149,7 @@ public interface Step extends Comparable<Step> {
|
||||
* method sets the count to 0 and returns the (positive) difference, indicating this step should
|
||||
* be removed from the sequence, and the remaining steps rewound from the preceding step.
|
||||
*
|
||||
* @param steps the count to rewind
|
||||
* @param count the count to rewind
|
||||
* @return the number of steps remaining
|
||||
*/
|
||||
long rewind(long count);
|
||||
@ -155,7 +157,7 @@ public interface Step extends Comparable<Step> {
|
||||
/**
|
||||
* Richly compare this step to another
|
||||
*
|
||||
* @param step the object of comparison (this being the subject)
|
||||
* @param that the object of comparison (this being the subject)
|
||||
* @return a result describing the relationship from subject to object
|
||||
*/
|
||||
CompareResult compareStep(Step that);
|
||||
@ -183,7 +185,7 @@ public interface Step extends Comparable<Step> {
|
||||
return thread;
|
||||
}
|
||||
|
||||
<T> void execute(PcodeThread<T> emuThread, Stepper stepper, TaskMonitor monitor)
|
||||
void execute(PcodeThread<?> emuThread, Stepper stepper, TaskMonitor monitor)
|
||||
throws CancelledException;
|
||||
|
||||
long coalescePatches(Language language, List<Step> steps);
|
||||
|
@ -4,9 +4,9 @@
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@ -48,6 +48,11 @@ public class TickStep extends AbstractStep {
|
||||
return StepType.TICK;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getSkipCount() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String toStringStepPart() {
|
||||
return Long.toString(tickCount);
|
||||
@ -66,7 +71,7 @@ public class TickStep extends AbstractStep {
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> void execute(PcodeThread<T> emuThread, Stepper stepper, TaskMonitor monitor)
|
||||
public void execute(PcodeThread<?> emuThread, Stepper stepper, TaskMonitor monitor)
|
||||
throws CancelledException {
|
||||
for (int i = 0; i < tickCount; i++) {
|
||||
monitor.incrementProgress(1);
|
||||
|
@ -4,9 +4,9 @@
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@ -39,7 +39,7 @@ public class TraceSchedule implements Comparable<TraceSchedule> {
|
||||
* @return the schedule
|
||||
*/
|
||||
public static final TraceSchedule snap(long snap) {
|
||||
return new TraceSchedule(snap, new Sequence(), new Sequence());
|
||||
return new TraceSchedule(snap, new Sequence(), new Sequence(), Source.RECORD);
|
||||
}
|
||||
|
||||
private static final String PARSE_ERR_MSG =
|
||||
@ -55,9 +55,10 @@ public class TraceSchedule implements Comparable<TraceSchedule> {
|
||||
* threads forward, and/or patching machine state.
|
||||
*
|
||||
* @param spec the string specification
|
||||
* @param source the presumed source of the schedule
|
||||
* @return the parsed schedule
|
||||
*/
|
||||
public static TraceSchedule parse(String spec) {
|
||||
public static TraceSchedule parse(String spec, Source source) {
|
||||
String[] parts = spec.split(":", 2);
|
||||
if (parts.length > 2) {
|
||||
throw new AssertionError();
|
||||
@ -98,24 +99,76 @@ public class TraceSchedule implements Comparable<TraceSchedule> {
|
||||
ticks = new Sequence();
|
||||
pTicks = new Sequence();
|
||||
}
|
||||
return new TraceSchedule(snap, ticks, pTicks);
|
||||
return new TraceSchedule(snap, ticks, pTicks, source);
|
||||
}
|
||||
|
||||
/**
|
||||
* As in {@link #parse(String, Source)}, but assumed abnormal
|
||||
*
|
||||
* @param spec the string specification
|
||||
* @return the parsed schedule
|
||||
*/
|
||||
public static TraceSchedule parse(String spec) {
|
||||
return parse(spec, Source.INPUT);
|
||||
}
|
||||
|
||||
public enum Source {
|
||||
/**
|
||||
* The schedule comes from the user or some source other than a recorded emulation schedule.
|
||||
*/
|
||||
INPUT {
|
||||
@Override
|
||||
Source adjust(long pTickCount, long pPatchCount, long pSkipCount) {
|
||||
// The first tick is decode, so <= 1 tick is definitely not a full instruction
|
||||
return pTickCount <= 1 && pPatchCount == 0 && pSkipCount == 0 ? RECORD : INPUT;
|
||||
}
|
||||
},
|
||||
/**
|
||||
* The schedule comes from recording actual emulation.
|
||||
*
|
||||
* <p>
|
||||
* Specifically, the p-code steps must be known not to exceed one instruction.
|
||||
*/
|
||||
RECORD {
|
||||
@Override
|
||||
Source adjust(long pTickCount, long pPatchCount, long pSkipCount) {
|
||||
return pPatchCount == 0 && pSkipCount == 0 ? RECORD : INPUT;
|
||||
}
|
||||
};
|
||||
|
||||
abstract Source adjust(long tickCount, long patchCount, long skipCount);
|
||||
}
|
||||
|
||||
private final long snap;
|
||||
private final Sequence steps;
|
||||
private final Sequence pSteps;
|
||||
private final Source source;
|
||||
|
||||
/**
|
||||
* Construct the given schedule
|
||||
*
|
||||
* @param snap the initial trace snapshot
|
||||
* @param steps the step sequence
|
||||
* @param pSteps the of p-code step sequence
|
||||
* @param pSteps the p-code step sequence
|
||||
* @param source if the p-code steps are known not to exceed one instruction
|
||||
*/
|
||||
public TraceSchedule(long snap, Sequence steps, Sequence pSteps) {
|
||||
public TraceSchedule(long snap, Sequence steps, Sequence pSteps, Source source) {
|
||||
this.snap = snap;
|
||||
this.steps = steps;
|
||||
this.pSteps = pSteps;
|
||||
this.source = source.adjust(pSteps.totalTickCount(), pSteps.totalPatchCount(),
|
||||
pSteps.totalSkipCount());
|
||||
}
|
||||
|
||||
/**
|
||||
* Construct the given schedule, but assumed abnormal
|
||||
*
|
||||
* @param snap the initial trace snapshot
|
||||
* @param steps the step sequence
|
||||
* @param pSteps the p-code step sequence
|
||||
*/
|
||||
public TraceSchedule(long snap, Sequence steps, Sequence pSteps) {
|
||||
this(snap, steps, pSteps, Source.INPUT);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -152,33 +205,17 @@ public class TraceSchedule implements Comparable<TraceSchedule> {
|
||||
}
|
||||
|
||||
result = this.steps.compareSeq(that.steps);
|
||||
switch (result) {
|
||||
case UNREL_LT:
|
||||
case UNREL_GT:
|
||||
return result;
|
||||
case REL_LT:
|
||||
if (this.pSteps.isNop()) {
|
||||
return CompareResult.REL_LT;
|
||||
}
|
||||
else {
|
||||
return CompareResult.UNREL_LT;
|
||||
}
|
||||
case REL_GT:
|
||||
if (that.pSteps.isNop()) {
|
||||
return CompareResult.REL_GT;
|
||||
}
|
||||
else {
|
||||
return CompareResult.UNREL_GT;
|
||||
}
|
||||
default: // EQUALS, compare pSteps
|
||||
}
|
||||
|
||||
result = this.pSteps.compareSeq(that.pSteps);
|
||||
if (result != CompareResult.EQUALS) {
|
||||
return result;
|
||||
}
|
||||
|
||||
return CompareResult.EQUALS;
|
||||
return switch (result) {
|
||||
case UNREL_LT, UNREL_GT -> result;
|
||||
case REL_LT -> (this.pSteps.isNop() || this.source == Source.RECORD)
|
||||
? CompareResult.REL_LT
|
||||
: CompareResult.UNREL_LT;
|
||||
case REL_GT -> (that.pSteps.isNop() || that.source == Source.RECORD)
|
||||
? CompareResult.REL_GT
|
||||
: CompareResult.UNREL_GT;
|
||||
default -> this.pSteps.compareSeq(that.pSteps);
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -222,10 +259,19 @@ public class TraceSchedule implements Comparable<TraceSchedule> {
|
||||
return steps.isNop() && pSteps.isNop();
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if this schedule has instruction steps
|
||||
*
|
||||
* @return true if this indicates at least one instruction step
|
||||
*/
|
||||
public boolean hasSteps() {
|
||||
return !steps.isNop();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the source snapshot
|
||||
*
|
||||
* @return
|
||||
* @return the snapshot key
|
||||
*/
|
||||
public long getSnap() {
|
||||
return snap;
|
||||
@ -234,7 +280,7 @@ public class TraceSchedule implements Comparable<TraceSchedule> {
|
||||
/**
|
||||
* Get the last thread key stepped by this schedule
|
||||
*
|
||||
* @return
|
||||
* @return the thread key
|
||||
*/
|
||||
public long getLastThreadKey() {
|
||||
long last = pSteps.getLastThreadKey();
|
||||
@ -396,6 +442,7 @@ public class TraceSchedule implements Comparable<TraceSchedule> {
|
||||
pRemains.execute(trace, lastThread, machine, Stepper.pcode(), monitor);
|
||||
}
|
||||
else {
|
||||
remains = remains.checkFinish(lastThread, machine);
|
||||
lastThread =
|
||||
remains.execute(trace, lastThread, machine, Stepper.instruction(), monitor);
|
||||
lastThread = pSteps.execute(trace, lastThread, machine, Stepper.pcode(), monitor);
|
||||
@ -417,7 +464,7 @@ public class TraceSchedule implements Comparable<TraceSchedule> {
|
||||
public TraceSchedule steppedForward(TraceThread thread, long tickCount) {
|
||||
Sequence steps = this.steps.clone();
|
||||
steps.advance(new TickStep(thread == null ? -1 : thread.getKey(), tickCount));
|
||||
return new TraceSchedule(snap, steps, new Sequence());
|
||||
return new TraceSchedule(snap, steps, new Sequence(), Source.RECORD);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -430,7 +477,7 @@ public class TraceSchedule implements Comparable<TraceSchedule> {
|
||||
public TraceSchedule skippedForward(TraceThread thread, long tickCount) {
|
||||
Sequence steps = this.steps.clone();
|
||||
steps.advance(new SkipStep(thread == null ? -1 : thread.getKey(), tickCount));
|
||||
return new TraceSchedule(snap, steps, new Sequence());
|
||||
return new TraceSchedule(snap, steps, new Sequence(), Source.RECORD);
|
||||
}
|
||||
|
||||
protected TraceSchedule doSteppedBackward(Trace trace, long tickCount, Set<Long> visited) {
|
||||
@ -454,7 +501,7 @@ public class TraceSchedule implements Comparable<TraceSchedule> {
|
||||
}
|
||||
Sequence steps = this.steps.clone();
|
||||
steps.rewind(tickCount);
|
||||
return new TraceSchedule(snap, steps, new Sequence());
|
||||
return new TraceSchedule(snap, steps, new Sequence(), Source.RECORD);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -486,7 +533,7 @@ public class TraceSchedule implements Comparable<TraceSchedule> {
|
||||
public TraceSchedule steppedPcodeForward(TraceThread thread, int pTickCount) {
|
||||
Sequence pTicks = this.pSteps.clone();
|
||||
pTicks.advance(new TickStep(thread == null ? -1 : thread.getKey(), pTickCount));
|
||||
return new TraceSchedule(snap, steps.clone(), pTicks);
|
||||
return new TraceSchedule(snap, steps.clone(), pTicks, Source.INPUT);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -499,7 +546,7 @@ public class TraceSchedule implements Comparable<TraceSchedule> {
|
||||
public TraceSchedule skippedPcodeForward(TraceThread thread, int pTickCount) {
|
||||
Sequence pTicks = this.pSteps.clone();
|
||||
pTicks.advance(new SkipStep(thread == null ? -1 : thread.getKey(), pTickCount));
|
||||
return new TraceSchedule(snap, steps.clone(), pTicks);
|
||||
return new TraceSchedule(snap, steps.clone(), pTicks, Source.INPUT);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -519,7 +566,7 @@ public class TraceSchedule implements Comparable<TraceSchedule> {
|
||||
}
|
||||
Sequence pTicks = this.pSteps.clone();
|
||||
pTicks.rewind(pStepCount);
|
||||
return new TraceSchedule(snap, steps.clone(), pTicks);
|
||||
return new TraceSchedule(snap, steps.clone(), pTicks, Source.INPUT);
|
||||
}
|
||||
|
||||
private long keyOf(TraceThread thread) {
|
||||
@ -530,6 +577,7 @@ public class TraceSchedule implements Comparable<TraceSchedule> {
|
||||
* Returns the equivalent of executing this schedule then performing a given patch
|
||||
*
|
||||
* @param thread the thread context for the patch; cannot be null
|
||||
* @param language the sleigh language for the patch
|
||||
* @param sleigh a single line of sleigh, excluding the terminating semicolon.
|
||||
* @return the resulting schedule
|
||||
*/
|
||||
@ -538,19 +586,20 @@ public class TraceSchedule implements Comparable<TraceSchedule> {
|
||||
Sequence pTicks = this.pSteps.clone();
|
||||
pTicks.advance(new PatchStep(thread.getKey(), sleigh));
|
||||
pTicks.coalescePatches(language);
|
||||
return new TraceSchedule(snap, steps.clone(), pTicks);
|
||||
return new TraceSchedule(snap, steps.clone(), pTicks, Source.INPUT);
|
||||
}
|
||||
Sequence ticks = this.steps.clone();
|
||||
ticks.advance(new PatchStep(keyOf(thread), sleigh));
|
||||
ticks.coalescePatches(language);
|
||||
return new TraceSchedule(snap, ticks, new Sequence());
|
||||
return new TraceSchedule(snap, ticks, new Sequence(), Source.RECORD);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the equivalent of executing this schedule then performing the given patches
|
||||
*
|
||||
* @param thread the thread context for the patch; cannot be null
|
||||
* @param sleigh the lines of sleigh, excluding the terminating semicolons.
|
||||
* @param language the sleigh language for the patch
|
||||
* @param sleigh the lines of sleigh, excluding the terminating semicolons
|
||||
* @return the resulting schedule
|
||||
*/
|
||||
public TraceSchedule patched(TraceThread thread, Language language, List<String> sleigh) {
|
||||
@ -560,14 +609,14 @@ public class TraceSchedule implements Comparable<TraceSchedule> {
|
||||
pTicks.advance(new PatchStep(thread.getKey(), line));
|
||||
}
|
||||
pTicks.coalescePatches(language);
|
||||
return new TraceSchedule(snap, steps.clone(), pTicks);
|
||||
return new TraceSchedule(snap, steps.clone(), pTicks, Source.INPUT);
|
||||
}
|
||||
Sequence ticks = this.steps.clone();
|
||||
for (String line : sleigh) {
|
||||
ticks.advance(new PatchStep(thread.getKey(), line));
|
||||
}
|
||||
ticks.coalescePatches(language);
|
||||
return new TraceSchedule(snap, ticks, new Sequence());
|
||||
return new TraceSchedule(snap, ticks, new Sequence(), Source.RECORD);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -575,7 +624,7 @@ public class TraceSchedule implements Comparable<TraceSchedule> {
|
||||
*
|
||||
* <p>
|
||||
* This operation cannot be used to append instruction steps after p-code steps. Thus, if this
|
||||
* schedule contains any p-code steps and {@code} next has instruction steps, an error will be
|
||||
* schedule contains any p-code steps and {@code next} has instruction steps, an error will be
|
||||
*
|
||||
* @param next the schedule to append. Its snap is ignored.
|
||||
* @return the complete schedule
|
||||
@ -586,16 +635,25 @@ public class TraceSchedule implements Comparable<TraceSchedule> {
|
||||
if (this.pSteps.isNop()) {
|
||||
Sequence ticks = this.steps.clone();
|
||||
ticks.advance(next.steps);
|
||||
return new TraceSchedule(this.snap, ticks, next.pSteps.clone());
|
||||
return new TraceSchedule(this.snap, ticks, next.pSteps.clone(), next.source);
|
||||
}
|
||||
else if (next.steps.isNop()) {
|
||||
Sequence pTicks = this.steps.clone();
|
||||
Sequence pTicks = this.pSteps.clone();
|
||||
pTicks.advance(next.pSteps);
|
||||
return new TraceSchedule(this.snap, this.steps.clone(), pTicks);
|
||||
return new TraceSchedule(this.snap, this.steps.clone(), pTicks, Source.INPUT);
|
||||
}
|
||||
throw new IllegalArgumentException("Cannot have instructions steps following p-code steps");
|
||||
}
|
||||
|
||||
/**
|
||||
* Drop the p-code steps
|
||||
*
|
||||
* @return the schedule without ops
|
||||
*/
|
||||
public TraceSchedule dropPSteps() {
|
||||
return new TraceSchedule(this.snap, this.steps, new Sequence());
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the threads involved in the schedule
|
||||
*
|
||||
@ -611,4 +669,8 @@ public class TraceSchedule implements Comparable<TraceSchedule> {
|
||||
result.remove(null);
|
||||
return result;
|
||||
}
|
||||
|
||||
public TraceSchedule assumeRecorded() {
|
||||
return new TraceSchedule(snap, steps, pSteps, Source.RECORD);
|
||||
}
|
||||
}
|
||||
|
@ -4,9 +4,9 @@
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@ -19,13 +19,16 @@ import static ghidra.lifecycle.Unfinished.TODO;
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.*;
|
||||
|
||||
import org.junit.*;
|
||||
|
||||
import db.Transaction;
|
||||
import ghidra.app.plugin.assembler.*;
|
||||
import ghidra.program.database.ProgramBuilder;
|
||||
import ghidra.program.model.address.AddressSet;
|
||||
import ghidra.program.disassemble.Disassembler;
|
||||
import ghidra.program.model.address.*;
|
||||
import ghidra.program.model.data.*;
|
||||
import ghidra.program.model.lang.*;
|
||||
import ghidra.program.model.listing.*;
|
||||
@ -35,6 +38,13 @@ import ghidra.test.AbstractGhidraHeadlessIntegrationTest;
|
||||
import ghidra.trace.database.ToyDBTraceBuilder;
|
||||
import ghidra.trace.database.listing.DBTraceCodeManager;
|
||||
import ghidra.trace.database.memory.DBTraceMemoryManager;
|
||||
import ghidra.trace.model.Lifespan;
|
||||
import ghidra.trace.model.memory.TraceMemoryFlag;
|
||||
import ghidra.trace.model.thread.TraceThread;
|
||||
import ghidra.trace.model.time.TraceSnapshot;
|
||||
import ghidra.trace.model.time.TraceTimeManager;
|
||||
import ghidra.trace.model.time.schedule.TraceSchedule;
|
||||
import ghidra.util.Msg;
|
||||
import ghidra.util.exception.CancelledException;
|
||||
import ghidra.util.task.TaskMonitor;
|
||||
|
||||
@ -48,9 +58,11 @@ public class DBTraceProgramViewListingTest extends AbstractGhidraHeadlessIntegra
|
||||
DBTraceCodeManager code;
|
||||
|
||||
protected static void assertUndefined(CodeUnit cu) {
|
||||
Data data = (Data) cu;
|
||||
assertEquals(DataType.DEFAULT, data.getDataType());
|
||||
assertFalse(data.isDefined());
|
||||
if (cu instanceof Data data && DataType.DEFAULT.equals(data.getDataType()) &&
|
||||
!data.isDefined()) {
|
||||
return;
|
||||
}
|
||||
fail("Expected undefined unit, but was '%s'".formatted(cu));
|
||||
}
|
||||
|
||||
protected <T> List<T> takeN(int n, Iterator<T> it) {
|
||||
@ -896,4 +908,81 @@ public class DBTraceProgramViewListingTest extends AbstractGhidraHeadlessIntegra
|
||||
assertArrayEquals(b.arr(1), cu0.getBytes());
|
||||
assertArrayEquals(b.arr(8), cu1.getBytes());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetCodeUnitsInScratchView() throws Throwable {
|
||||
TraceTimeManager tm = b.trace.getTimeManager();
|
||||
Address entry = b.addr(0x00400000);
|
||||
AddressSetView set = b.set(b.range(0x00400000, 0x00400003));
|
||||
Assembler asm = Assemblers.getAssembler(b.language);
|
||||
|
||||
AssemblyBuffer buf = new AssemblyBuffer(asm, entry);
|
||||
buf.assemble("imm r1, #234");
|
||||
buf.assemble("add r1, r1");
|
||||
|
||||
final long snap;
|
||||
try (Transaction tx = b.startTransaction()) {
|
||||
TraceThread thread = b.getOrAddThread("Threads[1]", 0);
|
||||
tm.getSnapshot(0, true);
|
||||
memory.addRegion("Memory[test]", Lifespan.nowOn(0), b.range(0x00400000, 0x00400fff),
|
||||
TraceMemoryFlag.READ, TraceMemoryFlag.EXECUTE);
|
||||
|
||||
TraceSnapshot scratch = tm.getSnapshot(Long.MIN_VALUE, true);
|
||||
snap = scratch.getKey();
|
||||
scratch.setSchedule(TraceSchedule.ZERO.steppedForward(thread, 1));
|
||||
|
||||
view.setSnap(snap);
|
||||
Disassembler dis =
|
||||
Disassembler.getDisassembler(view, TaskMonitor.DUMMY, msg -> Msg.error(this, msg));
|
||||
AddressSetView result = dis.disassemble(entry, set);
|
||||
assertEquals(set, result);
|
||||
|
||||
assertEquals(4, memory.putBytes(0, entry, ByteBuffer.wrap(buf.getBytes())));
|
||||
// No disassembly at snap 0
|
||||
}
|
||||
|
||||
byte[] arr = new byte[4];
|
||||
view.getMemory().getBytes(entry, arr);
|
||||
assertArrayEquals(buf.getBytes(), arr);
|
||||
assertUndefined(listing.getCodeUnitAt(entry));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateCodeUnitsInScratchViewAfterBytesChanged() throws Throwable {
|
||||
TraceTimeManager tm = b.trace.getTimeManager();
|
||||
Address entry = b.addr(0x00400000);
|
||||
AddressSetView set = b.set(b.range(0x00400000, 0x00400003));
|
||||
Assembler asm = Assemblers.getAssembler(b.language);
|
||||
|
||||
AssemblyBuffer buf = new AssemblyBuffer(asm, entry);
|
||||
buf.assemble("imm r1, #234");
|
||||
buf.assemble("add r1, r1");
|
||||
|
||||
final long snap;
|
||||
try (Transaction tx = b.startTransaction()) {
|
||||
TraceThread thread = b.getOrAddThread("Threads[1]", 0);
|
||||
tm.getSnapshot(0, true);
|
||||
memory.addRegion("Memory[test]", Lifespan.nowOn(0), b.range(0x00400000, 0x00400fff),
|
||||
TraceMemoryFlag.READ, TraceMemoryFlag.EXECUTE);
|
||||
|
||||
TraceSnapshot scratch = tm.getSnapshot(Long.MIN_VALUE, true);
|
||||
snap = scratch.getKey();
|
||||
scratch.setSchedule(TraceSchedule.ZERO.steppedForward(thread, 1));
|
||||
|
||||
view.setSnap(snap);
|
||||
Disassembler dis =
|
||||
Disassembler.getDisassembler(view, TaskMonitor.DUMMY, msg -> Msg.error(this, msg));
|
||||
AddressSetView result = dis.disassemble(entry, set);
|
||||
assertEquals(set, result);
|
||||
|
||||
assertEquals(4, memory.putBytes(0, entry, ByteBuffer.wrap(buf.getBytes())));
|
||||
// No disassembly at snap 0
|
||||
|
||||
// Attempt re-disassembly at scratch snap
|
||||
result = dis.disassemble(entry, set);
|
||||
assertEquals(set, result);
|
||||
}
|
||||
|
||||
assertEquals("imm r1,#0xea", listing.getCodeUnitAt(entry).toString());
|
||||
}
|
||||
}
|
||||
|
@ -4,9 +4,9 @@
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@ -224,19 +224,30 @@ public class TraceScheduleTest extends AbstractGhidraHeadlessIntegrationTest {
|
||||
expectU("0:10", "1:10");
|
||||
expectU("0:t0-10", "0:t1-10");
|
||||
// We don't know how many p-code steps complete an instruction step
|
||||
expectU("0:t0-10.1", "0:t0-11");
|
||||
// But we need at least 2 to actually enter the instruction
|
||||
expectU("0:t0-10.2", "0:t0-11");
|
||||
expectU("0:t0-10;t1-5", "0:t0-11;t1-5");
|
||||
|
||||
expectR("0:t0-10", "0:t0-11");
|
||||
expectR("0:t0-10", "0:t0-10;t1-5");
|
||||
expectR("0:t0-10", "0:t0-11;t1-5");
|
||||
expectR("0:t0-10", "0:t0-10.1");
|
||||
expectR("0:t0-10", "0:t0-11.1");
|
||||
expectR("0:t0-10", "0:t0-10;t1-5.1");
|
||||
expectR("0:t0-10", "0:t0-11;t1-5.1");
|
||||
expectR("0:t0-10", "0:t0-10.2");
|
||||
expectR("0:t0-10", "0:t0-11.2");
|
||||
expectR("0:t0-10", "0:t0-10;t1-5.2");
|
||||
expectR("0:t0-10", "0:t0-11;t1-5.2");
|
||||
|
||||
expectE("0:t0-10", "0:t0-10");
|
||||
expectE("0:t0-10.1", "0:t0-10.1");
|
||||
expectE("0:t0-10.2", "0:t0-10.2");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCompare2() {
|
||||
TraceSchedule timeL = TraceSchedule.parse("0:t0-2.5");
|
||||
TraceSchedule timeR = TraceSchedule.parse("0:t0-3");
|
||||
|
||||
assertEquals(CompareResult.UNREL_LT, timeL.compareSchedule(timeR));
|
||||
assertEquals(CompareResult.REL_LT, timeL.assumeRecorded().compareSchedule(timeR));
|
||||
assertEquals(CompareResult.UNREL_LT, timeL.compareSchedule(timeR.assumeRecorded()));
|
||||
}
|
||||
|
||||
public String strRelativize(String fromSpec, String toSpec) {
|
||||
|
@ -4,9 +4,9 @@
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@ -66,6 +66,11 @@ task copyPropertiesFile(type: Copy) {
|
||||
into 'build/ziplayout'
|
||||
}
|
||||
|
||||
task copyElasticJar(type: Copy) {
|
||||
from 'build/libs/lsh.jar'
|
||||
into 'build/ziplayout'
|
||||
}
|
||||
|
||||
task elasticPluginJar(type: Jar) {
|
||||
from sourceSets.elasticPlugin.output
|
||||
archiveBaseName = 'lsh'
|
||||
@ -87,11 +92,15 @@ task elasticPluginZip(type: Zip) {
|
||||
destinationDirectory = file("build/data")
|
||||
}
|
||||
|
||||
// Currently targeting elasticsearch-8.8.1 which by default runs with java 20
|
||||
compileElasticPluginJava.options.release = 20
|
||||
|
||||
compileElasticPluginJava.dependsOn copyGenericTask
|
||||
compileElasticPluginJava.dependsOn copyUtilityTask
|
||||
compileElasticPluginJava.dependsOn copyBSimTask
|
||||
|
||||
elasticPluginZip.dependsOn elasticPluginJar
|
||||
copyElasticJar.dependsOn elasticPluginJar
|
||||
elasticPluginZip.dependsOn copyElasticJar
|
||||
elasticPluginZip.dependsOn copyPropertiesFile
|
||||
|
||||
jar.dependsOn elasticPluginZip
|
||||
|
@ -18,6 +18,7 @@ package ghidra.features.bsim.query;
|
||||
import java.net.URL;
|
||||
import java.sql.Connection;
|
||||
import java.sql.SQLException;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
|
||||
import javax.security.auth.callback.NameCallback;
|
||||
@ -40,12 +41,14 @@ public class BSimPostgresDBConnectionManager {
|
||||
private static final int CONN_POOL_MAX_IDLE = 2;
|
||||
|
||||
private static HashMap<BSimServerInfo, BSimPostgresDataSource> dataSourceMap = new HashMap<>();
|
||||
private static boolean shutdownHookInstalled = false;
|
||||
|
||||
public static synchronized BSimPostgresDataSource getDataSource(
|
||||
BSimServerInfo postgresServerInfo) {
|
||||
if (postgresServerInfo.getDBType() != DBType.postgres) {
|
||||
throw new IllegalArgumentException("expected postgres server info");
|
||||
}
|
||||
enableShutdownHook();
|
||||
return dataSourceMap.computeIfAbsent(postgresServerInfo,
|
||||
info -> new BSimPostgresDataSource(info));
|
||||
}
|
||||
@ -75,6 +78,30 @@ public class BSimPostgresDBConnectionManager {
|
||||
dataSourceMap.remove(serverInfo);
|
||||
}
|
||||
|
||||
private static synchronized void enableShutdownHook() {
|
||||
if (shutdownHookInstalled) {
|
||||
return;
|
||||
}
|
||||
Runtime.getRuntime().addShutdownHook(new Thread() {
|
||||
@Override
|
||||
public void run() {
|
||||
Collection<BSimPostgresDataSource> dataSources = dataSourceMap.values();
|
||||
for (BSimPostgresDataSource ds : dataSources) {
|
||||
int activeConnections = ds.getActiveConnections();
|
||||
if (activeConnections != 0) {
|
||||
Msg.error(BSimPostgresDBConnectionManager.class,
|
||||
activeConnections +
|
||||
" BSim active Postgres connections were not properly closed: " +
|
||||
ds.serverInfo);
|
||||
}
|
||||
ds.close();
|
||||
}
|
||||
dataSourceMap.clear();
|
||||
}
|
||||
});
|
||||
shutdownHookInstalled = true;
|
||||
}
|
||||
|
||||
public static class BSimPostgresDataSource implements BSimJDBCDataSource { // NOTE: can be renamed
|
||||
|
||||
private final BSimServerInfo serverInfo;
|
||||
|
@ -4,9 +4,9 @@
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@ -42,7 +42,7 @@ import ghidra.features.bsim.query.protocol.*;
|
||||
*
|
||||
*/
|
||||
public final class PostgresFunctionDatabase
|
||||
extends AbstractSQLFunctionDatabase<WeightedLSHCosineVectorFactory> {
|
||||
extends AbstractSQLFunctionDatabase<WeightedLSHCosineVectorFactory> {
|
||||
|
||||
// NOTE: Previously named ColumnDatabase
|
||||
|
||||
@ -101,7 +101,7 @@ public final class PostgresFunctionDatabase
|
||||
}
|
||||
|
||||
private void changePassword(Connection c, String username, char[] newPassword)
|
||||
throws SQLException {
|
||||
throws SQLException {
|
||||
StringBuilder buffer = new StringBuilder();
|
||||
buffer.append("ALTER ROLE \"");
|
||||
buffer.append(username);
|
||||
@ -158,7 +158,7 @@ public final class PostgresFunctionDatabase
|
||||
*/
|
||||
private void serverLoadWeights(Connection db) throws SQLException {
|
||||
try (Statement st = db.createStatement();
|
||||
ResultSet rs = st.executeQuery("SELECT lsh_load()")) {
|
||||
ResultSet rs = st.executeQuery("SELECT lsh_load()")) {
|
||||
while (rs.next()) {
|
||||
// int val = rs.getInt(1);
|
||||
}
|
||||
@ -171,13 +171,16 @@ public final class PostgresFunctionDatabase
|
||||
Connection db = initConnection();
|
||||
serverLoadWeights(db);
|
||||
|
||||
if (asynchronous) {
|
||||
try (Statement st = db.createStatement()) {
|
||||
// Tell server to do asynchronous commits. This speeds up large
|
||||
// ingests with a (slight) danger of
|
||||
// losing the most recent commits if the server crashes (NOTE:
|
||||
// database integrity should still be recoverable)
|
||||
st.executeUpdate("SET LOCAL synchronous_commit TO OFF");
|
||||
try (Statement st = db.createStatement()) {
|
||||
// Tell server to do asynchronous commits. This speeds up large
|
||||
// ingests with a (slight) danger of
|
||||
// losing the most recent commits if the server crashes (NOTE:
|
||||
// database integrity should still be recoverable)
|
||||
if (asynchronous) {
|
||||
st.executeUpdate("SET SESSION synchronous_commit TO OFF");
|
||||
}
|
||||
else {
|
||||
st.executeUpdate("SET SESSION synchronous_commit to ON");
|
||||
}
|
||||
}
|
||||
|
||||
@ -223,12 +226,15 @@ public final class PostgresFunctionDatabase
|
||||
|
||||
serverLoadWeights(db);
|
||||
|
||||
// Tell server to do asynchronous commits. This speeds up large
|
||||
// ingests with a (slight) danger of
|
||||
// losing the most recent commits if the server crashes (NOTE:
|
||||
// database integrity should still be recoverable)
|
||||
if (asynchronous) {
|
||||
// Tell server to do asynchronous commits. This speeds up large
|
||||
// ingests with a (slight) danger of
|
||||
// losing the most recent commits if the server crashes (NOTE:
|
||||
// database integrity should still be recoverable)
|
||||
st.executeUpdate("SET LOCAL synchronous_commit TO OFF");
|
||||
st.executeUpdate("SET SESSION synchronous_commit TO OFF");
|
||||
}
|
||||
else {
|
||||
st.executeUpdate("SET SESSION synchronous_commit to ON");
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -253,7 +259,7 @@ public final class PostgresFunctionDatabase
|
||||
*/
|
||||
private void rebuildIndex(Connection c) throws SQLException {
|
||||
try (Statement st = c.createStatement();
|
||||
ResultSet rs = st.executeQuery("SELECT lsh_reload()")) {
|
||||
ResultSet rs = st.executeQuery("SELECT lsh_reload()")) {
|
||||
st.execute("SET maintenance_work_mem TO '2GB'");
|
||||
st.execute(
|
||||
"CREATE INDEX vectable_vec_idx ON vectable USING gin (vec gin_lshvector_ops)");
|
||||
@ -275,7 +281,7 @@ public final class PostgresFunctionDatabase
|
||||
* @throws SQLException if there is a problem creating or executing the query
|
||||
*/
|
||||
private int preWarm(Connection c, int mainIndex, int secondaryIndex, int vectors)
|
||||
throws SQLException {
|
||||
throws SQLException {
|
||||
try (Statement st = c.createStatement()) {
|
||||
// Try to load the entire main index into the PostgreSQL cache
|
||||
int res = -1;
|
||||
@ -386,8 +392,8 @@ public final class PostgresFunctionDatabase
|
||||
* @throws SQLException if there is a problem creating or executing the query
|
||||
*/
|
||||
@Override
|
||||
protected int queryNearestVector(List<VectorResult> resultset, LSHVector vec,
|
||||
double simthresh, double sigthresh, int max) throws SQLException {
|
||||
protected int queryNearestVector(List<VectorResult> resultset, LSHVector vec, double simthresh,
|
||||
double sigthresh, int max) throws SQLException {
|
||||
PreparedStatement s =
|
||||
selectNearestVectorStatement.prepareIfNeeded(() -> initConnection().prepareStatement(
|
||||
"WITH const(cvec) AS (VALUES( lshvector_in( CAST( ? AS cstring) ) ) )," +
|
||||
@ -469,7 +475,7 @@ public final class PostgresFunctionDatabase
|
||||
@Override
|
||||
protected VectorResult queryVectorId(long id) throws SQLException {
|
||||
PreparedStatement s = selectVectorByRowIdStatement.prepareIfNeeded(() -> initConnection()
|
||||
.prepareStatement("SELECT id,count,vec FROM vectable WHERE id = ?"));
|
||||
.prepareStatement("SELECT id,count,vec FROM vectable WHERE id = ?"));
|
||||
s.setLong(1, id);
|
||||
try (ResultSet rs = s.executeQuery()) {
|
||||
if (!rs.next()) {
|
||||
@ -506,7 +512,7 @@ public final class PostgresFunctionDatabase
|
||||
|
||||
@Override
|
||||
public QueryResponseRecord doQuery(BSimQuery<?> query, Connection c)
|
||||
throws SQLException, LSHException, DatabaseNonFatalException {
|
||||
throws SQLException, LSHException, DatabaseNonFatalException {
|
||||
|
||||
if (query instanceof PrewarmRequest q) {
|
||||
fdbPrewarm(q, c);
|
||||
|
@ -40,6 +40,7 @@ public class BSimH2FileDBConnectionManager {
|
||||
* Data source map keyed by absolute DB file path
|
||||
*/
|
||||
private static HashMap<BSimServerInfo, BSimH2FileDataSource> dataSourceMap = new HashMap<>();
|
||||
private static boolean shutdownHookInstalled = false;
|
||||
|
||||
/**
|
||||
* Get all H2 File DB data sorces which exist in the JVM.
|
||||
@ -62,6 +63,7 @@ public class BSimH2FileDBConnectionManager {
|
||||
if (fileServerInfo.getDBType() != DBType.file) {
|
||||
throw new IllegalArgumentException("expected file info");
|
||||
}
|
||||
enableShutdownHook();
|
||||
return dataSourceMap.computeIfAbsent(fileServerInfo,
|
||||
info -> new BSimH2FileDataSource(info));
|
||||
}
|
||||
@ -102,6 +104,30 @@ public class BSimH2FileDBConnectionManager {
|
||||
return true;
|
||||
}
|
||||
|
||||
private static synchronized void enableShutdownHook() {
|
||||
if (shutdownHookInstalled) {
|
||||
return;
|
||||
}
|
||||
Runtime.getRuntime().addShutdownHook(new Thread() {
|
||||
@Override
|
||||
public void run() {
|
||||
Collection<BSimH2FileDataSource> dataSources = dataSourceMap.values();
|
||||
for (BSimH2FileDataSource ds : dataSources) {
|
||||
int activeConnections = ds.getActiveConnections();
|
||||
if (activeConnections != 0) {
|
||||
Msg.error(BSimH2FileDBConnectionManager.class,
|
||||
activeConnections +
|
||||
" BSim active H2 File connections were not properly closed: " +
|
||||
ds.serverInfo);
|
||||
}
|
||||
ds.close();
|
||||
}
|
||||
dataSourceMap.clear();
|
||||
}
|
||||
});
|
||||
shutdownHookInstalled = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* {@link BSimH2FileDataSource} provides a pooled DB data source for a specific H2 File DB.
|
||||
*/
|
||||
|
@ -237,7 +237,7 @@ public class VSCodeProjectScript extends GhidraScript {
|
||||
private void writeSampleModule(File installDir, File projectDir) throws IOException {
|
||||
// Copy Skeleton and rename module
|
||||
String skeleton = "Skeleton";
|
||||
File skeletonDir = new File(installDir, "Extensions/Ghidra/skeleton");
|
||||
File skeletonDir = new File(installDir, "Extensions/Ghidra/Skeleton");
|
||||
FileUtils.copyDirectory(skeletonDir, projectDir);
|
||||
|
||||
// Rename package
|
||||
|
@ -4,9 +4,9 @@
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@ -26,6 +26,7 @@ import ghidra.program.disassemble.Disassembler;
|
||||
import ghidra.program.disassemble.DisassemblerContextImpl;
|
||||
import ghidra.program.model.address.*;
|
||||
import ghidra.program.model.block.*;
|
||||
import ghidra.program.model.data.Undefined;
|
||||
import ghidra.program.model.lang.Register;
|
||||
import ghidra.program.model.lang.RegisterValue;
|
||||
import ghidra.program.model.listing.*;
|
||||
@ -308,8 +309,17 @@ public class ClearFlowAndRepairCmd extends BackgroundCommand<Program> {
|
||||
}
|
||||
// no instruction, check if data is there
|
||||
Data data = listing.getDefinedDataAt(toAddr);
|
||||
// data or instruction not found at destination
|
||||
if (data == null) {
|
||||
continue; // instruction not found at destination
|
||||
continue; // don't add to clear set
|
||||
}
|
||||
// has an external reference from data, not produced from bad flow
|
||||
if (data.getExternalReference(0) != null) {
|
||||
continue; // don't add to clear set
|
||||
}
|
||||
// if defined data is anything other than Undefined1,2... or a pointer
|
||||
if (data.isDefined() && !(data.getDataType() instanceof Undefined) && !(data.isPointer())) {
|
||||
continue; // don't add to clear set
|
||||
}
|
||||
}
|
||||
boolean clearIt = true;
|
||||
|
@ -4,9 +4,9 @@
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@ -24,15 +24,15 @@ import ghidra.app.util.bin.format.macho.dyld.DyldChainedPtr;
|
||||
import ghidra.app.util.bin.format.macho.dyld.DyldChainedPtr.DyldChainType;
|
||||
import ghidra.app.util.bin.format.macho.dyld.DyldFixup;
|
||||
import ghidra.app.util.importer.MessageLog;
|
||||
import ghidra.app.util.opinion.MachoProgramBuilder;
|
||||
import ghidra.program.model.address.Address;
|
||||
import ghidra.program.model.listing.Library;
|
||||
import ghidra.program.model.listing.Program;
|
||||
import ghidra.program.model.mem.Memory;
|
||||
import ghidra.program.model.mem.MemoryAccessException;
|
||||
import ghidra.program.model.reloc.Relocation.Status;
|
||||
import ghidra.program.model.symbol.*;
|
||||
import ghidra.program.model.symbol.Symbol;
|
||||
import ghidra.program.model.symbol.SymbolTable;
|
||||
import ghidra.util.exception.CancelledException;
|
||||
import ghidra.util.exception.InvalidInputException;
|
||||
import ghidra.util.task.TaskMonitor;
|
||||
|
||||
public class DyldChainedFixups {
|
||||
@ -164,44 +164,20 @@ public class DyldChainedFixups {
|
||||
fixup.symbol() != null ? fixup.symbol().getName() : null);
|
||||
}
|
||||
if (fixup.symbol() != null && fixup.libOrdinal() != null) {
|
||||
fixupExternalLibrary(program, libraryPaths, fixup.libOrdinal(), fixup.symbol(), log,
|
||||
monitor);
|
||||
try {
|
||||
MachoProgramBuilder.fixupExternalLibrary(program, libraryPaths,
|
||||
fixup.libOrdinal(), fixup.symbol());
|
||||
}
|
||||
catch (Exception e) {
|
||||
log.appendMsg("WARNING: Problem fixing up symbol '%s' - %s"
|
||||
.formatted(fixup.symbol().getName(), e.getMessage()));
|
||||
}
|
||||
}
|
||||
}
|
||||
log.appendMsg("Fixed up " + fixedAddrs.size() + " chained pointers.");
|
||||
return fixedAddrs;
|
||||
}
|
||||
|
||||
/**
|
||||
* Associates the given {@link Symbol} with the correct external {@link Library} (fixing
|
||||
* the <EXTERNAL> association)
|
||||
*
|
||||
* @param program The {@link Program}
|
||||
* @param libraryPaths A {@link List} of library paths
|
||||
* @param libraryOrdinal The library ordinal
|
||||
* @param symbol The {@link Symbol}
|
||||
* @param log The log
|
||||
* @param monitor A cancellable monitor
|
||||
*/
|
||||
private static void fixupExternalLibrary(Program program, List<String> libraryPaths,
|
||||
int libraryOrdinal, Symbol symbol, MessageLog log, TaskMonitor monitor) {
|
||||
ExternalManager extManager = program.getExternalManager();
|
||||
int libraryIndex = libraryOrdinal - 1;
|
||||
if (libraryIndex >= 0 && libraryIndex < libraryPaths.size()) {
|
||||
Library library = extManager.getExternalLibrary(libraryPaths.get(libraryIndex));
|
||||
ExternalLocation loc =
|
||||
extManager.getUniqueExternalLocation(Library.UNKNOWN, symbol.getName());
|
||||
if (loc != null) {
|
||||
try {
|
||||
loc.setName(library, symbol.getName(), SourceType.IMPORTED);
|
||||
}
|
||||
catch (InvalidInputException e) {
|
||||
log.appendException(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//---------------------Below are used only by handled __thread_starts-------------------------
|
||||
|
||||
/**
|
||||
|
@ -4,9 +4,9 @@
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@ -57,8 +57,8 @@ public class Omf51ModuleHeader extends OmfRecord {
|
||||
struct.add(BYTE, "type", null);
|
||||
struct.add(WORD, "length", null);
|
||||
struct.add(moduleName.toDataType(), "name", null);
|
||||
struct.add(BYTE, "padding", null);
|
||||
struct.add(BYTE, "TRN ID", null);
|
||||
struct.add(BYTE, "padding", null);
|
||||
struct.add(BYTE, "checksum", null);
|
||||
|
||||
struct.setCategoryPath(new CategoryPath(OmfUtils.CATEGORY_PATH));
|
||||
|
@ -4,9 +4,9 @@
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@ -17,6 +17,7 @@ package ghidra.app.util.bin.format.pe;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import ghidra.app.cmd.data.CreateArrayCmd;
|
||||
import ghidra.app.util.bin.format.pe.LoadConfigDirectory.GuardFlags;
|
||||
@ -30,7 +31,6 @@ import ghidra.program.model.mem.Memory;
|
||||
import ghidra.program.model.mem.MemoryAccessException;
|
||||
import ghidra.program.model.symbol.SourceType;
|
||||
import ghidra.program.model.symbol.SymbolTable;
|
||||
import ghidra.util.Msg;
|
||||
import ghidra.util.exception.InvalidInputException;
|
||||
|
||||
/**
|
||||
@ -57,82 +57,21 @@ public class ControlFlowGuard {
|
||||
public static void markup(LoadConfigDirectory lcd, Program program, MessageLog log,
|
||||
NTHeader ntHeader) {
|
||||
|
||||
boolean is64bit = ntHeader.getOptionalHeader().is64bit();
|
||||
AddressSpace space = program.getAddressFactory().getDefaultAddressSpace();
|
||||
Memory mem = program.getMemory();
|
||||
SymbolTable symbolTable = program.getSymbolTable();
|
||||
|
||||
// ControlFlowGuard
|
||||
markupCfgCheckFunction(lcd, program, is64bit, space, mem, symbolTable);
|
||||
markupCfgDispatchFunction(lcd, program, is64bit, space, mem, symbolTable);
|
||||
markupCfgFunction("_guard_check_icall", "ControlFlowGuard check",
|
||||
lcd::getCfgCheckFunctionPointer, program, ntHeader, log);
|
||||
markupCfgFunction("_guard_dispatch_icall", "ControlFlowGuard dispatch",
|
||||
lcd::getCfgDispatchFunctionPointer, program, ntHeader, log);
|
||||
markupCfgFunctionTable(lcd, program, log);
|
||||
markupCfgAddressTakenIatEntryTable(lcd, program, log);
|
||||
|
||||
// ReturnFlowGuard
|
||||
markupRfgFailureRoutine(lcd, program, space, symbolTable);
|
||||
markupRfgDefaultFailureRoutine(lcd, program, is64bit, space, mem, symbolTable);
|
||||
markupRfgDefaultStackPointerFunction(lcd, program, is64bit, space, mem, symbolTable);
|
||||
}
|
||||
|
||||
/**
|
||||
* Performs markup on the ControlFlowGuard check function, if it exists.
|
||||
*
|
||||
* @param lcd The PE LoadConfigDirectory.
|
||||
* @param program The program.
|
||||
* @param is64bit True if the PE is 64-bit; false if it's 32-bit.
|
||||
* @param space The program's address space.
|
||||
* @param mem The program's memory.
|
||||
* @param symbolTable The program's symbol table.
|
||||
*/
|
||||
private static void markupCfgCheckFunction(LoadConfigDirectory lcd, Program program,
|
||||
boolean is64bit, AddressSpace space, Memory mem, SymbolTable symbolTable) {
|
||||
|
||||
if (lcd.getCfgCheckFunctionPointer() == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
Address functionPointerAddr = space.getAddress(lcd.getCfgCheckFunctionPointer());
|
||||
Address functionAddr = space.getAddress(
|
||||
is64bit ? mem.getLong(functionPointerAddr) : mem.getInt(functionPointerAddr));
|
||||
symbolTable.createLabel(functionAddr, "_guard_check_icall", SourceType.IMPORTED);
|
||||
|
||||
AbstractProgramLoader.markAsFunction(program, null, functionAddr);
|
||||
}
|
||||
catch (MemoryAccessException | AddressOutOfBoundsException | InvalidInputException e) {
|
||||
Msg.warn(ControlFlowGuard.class, "Unable to label ControlFlowGuard check function.", e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Performs markup on the ControlFlowGuard dispatch function, if it exists.
|
||||
*
|
||||
* @param lcd The PE LoadConfigDirectory.
|
||||
* @param program The program.
|
||||
* @param is64bit True if the PE is 64-bit; false if it's 32-bit.
|
||||
* @param space The program's address space.
|
||||
* @param mem The program's memory.
|
||||
* @param symbolTable The program's symbol table.
|
||||
*/
|
||||
private static void markupCfgDispatchFunction(LoadConfigDirectory lcd, Program program,
|
||||
boolean is64bit, AddressSpace space, Memory mem, SymbolTable symbolTable) {
|
||||
|
||||
if (lcd.getCfgDispatchFunctionPointer() == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
Address functionPointerAddr = space.getAddress(lcd.getCfgDispatchFunctionPointer());
|
||||
Address functionAddr = space.getAddress(
|
||||
is64bit ? mem.getLong(functionPointerAddr) : mem.getInt(functionPointerAddr));
|
||||
symbolTable.createLabel(functionAddr, "_guard_dispatch_icall", SourceType.IMPORTED);
|
||||
|
||||
AbstractProgramLoader.markAsFunction(program, null, functionAddr);
|
||||
}
|
||||
catch (MemoryAccessException | AddressOutOfBoundsException | InvalidInputException e) {
|
||||
Msg.warn(ControlFlowGuard.class, "Unable to label ControlFlowGuard dispatch function.",
|
||||
e);
|
||||
}
|
||||
markupCfgFunction("_guard_ss_verify_failure", "ReturnFlowGuard failure",
|
||||
lcd::getRfgFailureRoutine, program, ntHeader, log);
|
||||
markupCfgFunction("_guard_ss_verify_failure_default", "ReturnFlowGuard default failure",
|
||||
lcd::getRfgFailureRoutineFunctionPointer, program, ntHeader, log);
|
||||
markupCfgFunction("_guard_ss_verify_sp_default", "ReturnFlowGuard verify stack pointer",
|
||||
lcd::getRfgVerifyStackPointerFunctionPointer, program, ntHeader, log);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -164,7 +103,7 @@ public class ControlFlowGuard {
|
||||
.createLabel(tableAddr, GuardCFFunctionTableName, SourceType.IMPORTED);
|
||||
}
|
||||
catch (InvalidInputException e) {
|
||||
Msg.warn(ControlFlowGuard.class, "Unable to label ControlFlowGuard function table.", e);
|
||||
log.appendMsg("Unable to label ControlFlowGuard function table: " + e.getMessage());
|
||||
}
|
||||
|
||||
// Each table entry is an RVA (32-bit image base offset), followed by 'n' extra bytes
|
||||
@ -201,12 +140,12 @@ public class ControlFlowGuard {
|
||||
|
||||
private static void createCfgFunctions(Program program, Data tableData, MessageLog log) {
|
||||
if (tableData == null) {
|
||||
Msg.warn(ControlFlowGuard.class, "Couldn't find Control Flow Guard tables.");
|
||||
log.appendMsg("Couldn't find Control Flow Guard tables.");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!tableData.isArray() || (tableData.getNumComponents() < 1)) {
|
||||
Msg.warn(ControlFlowGuard.class, "Control Flow Guard table seems to be empty.");
|
||||
log.appendMsg("Control Flow Guard table seems to be empty.");
|
||||
return;
|
||||
}
|
||||
|
||||
@ -223,8 +162,8 @@ public class ControlFlowGuard {
|
||||
Data entry = table.getComponent(i);
|
||||
Data iboData = entry.getComponent(0);
|
||||
Object value = iboData.getValue();
|
||||
if (value instanceof Address) {
|
||||
list.add((Address) value);
|
||||
if (value instanceof Address addr) {
|
||||
list.add(addr);
|
||||
}
|
||||
}
|
||||
return list;
|
||||
@ -266,102 +205,62 @@ public class ControlFlowGuard {
|
||||
}
|
||||
}
|
||||
catch (AddressOutOfBoundsException | InvalidInputException e) {
|
||||
Msg.warn(ControlFlowGuard.class, "Unable to label ControlFlowGuard IAT table.", e);
|
||||
log.appendMsg("Unable to label ControlFlowGuard IAT table: " + e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Performs markup on the ReturnFlowGuard failure routine, if it exists.
|
||||
* Performs markup on a ControlFlowGuard function, if it exists.
|
||||
*
|
||||
* @param lcd The PE LoadConfigDirectory.
|
||||
* @param program The program
|
||||
* @param space The program's address space.
|
||||
* @param symbolTable The program's symbol table.
|
||||
* @param label The ControFlowGuard label to create.
|
||||
* @param description A short description of the ControlFlowGuard function type.
|
||||
* @param functionPointerGetter A method that returns the ControlFlowGuard function's pointer
|
||||
* address.
|
||||
* @param program The program.
|
||||
* @param ntHeader The PE NTHeader.
|
||||
* @param log The log.
|
||||
*/
|
||||
private static void markupRfgFailureRoutine(LoadConfigDirectory lcd, Program program,
|
||||
AddressSpace space, SymbolTable symbolTable) {
|
||||
private static void markupCfgFunction(String label, String description,
|
||||
Supplier<Long> functionPointerGetter, Program program, NTHeader ntHeader,
|
||||
MessageLog log) {
|
||||
|
||||
if (functionPointerGetter.get() == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (lcd.getRfgFailureRoutine() == 0) {
|
||||
AddressSpace space = program.getAddressFactory().getDefaultAddressSpace();
|
||||
Memory mem = program.getMemory();
|
||||
SymbolTable symbolTable = program.getSymbolTable();
|
||||
boolean is64bit = ntHeader.getOptionalHeader().is64bit();
|
||||
|
||||
Address functionPointerAddr = space.getAddress(functionPointerGetter.get());
|
||||
PeUtils.createData(program, functionPointerAddr, PointerDataType.dataType, log);
|
||||
|
||||
Address functionAddr;
|
||||
try {
|
||||
functionAddr = space.getAddress(
|
||||
is64bit ? mem.getLong(functionPointerAddr) : mem.getInt(functionPointerAddr));
|
||||
}
|
||||
catch (MemoryAccessException e) {
|
||||
log.appendMsg("Failed to read %s function pointer address at %s".formatted(description,
|
||||
functionPointerAddr));
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
Address routineAddr = space.getAddress(lcd.getRfgFailureRoutine());
|
||||
symbolTable.createLabel(routineAddr, "_guard_ss_verify_failure", SourceType.IMPORTED);
|
||||
|
||||
AbstractProgramLoader.markAsFunction(program, null, routineAddr);
|
||||
symbolTable.createLabel(functionAddr, label, SourceType.IMPORTED);
|
||||
}
|
||||
catch (AddressOutOfBoundsException | InvalidInputException e) {
|
||||
Msg.warn(ControlFlowGuard.class, "Unable to label ReturnFlowGuard failure routine.", e);
|
||||
log.appendMsg("Unable to apply label '%s' to %s function at %s: %s".formatted(label,
|
||||
description, functionAddr, e.getMessage()));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Performs markup on the ReturnFlowGuard "default" failure routine function, if it exists.
|
||||
*
|
||||
* @param lcd The PE LoadConfigDirectory.
|
||||
* @param program The program
|
||||
* @param is64bit True if the PE is 64-bit; false if it's 32-bit.
|
||||
* @param space The program's address space.
|
||||
* @param mem The program's memory.
|
||||
* @param symbolTable The program's symbol table.
|
||||
*/
|
||||
private static void markupRfgDefaultFailureRoutine(LoadConfigDirectory lcd, Program program,
|
||||
boolean is64bit, AddressSpace space, Memory mem, SymbolTable symbolTable) {
|
||||
|
||||
if (lcd.getRfgFailureRoutineFunctionPointer() == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
Address functionPointerAddr =
|
||||
space.getAddress(lcd.getRfgFailureRoutineFunctionPointer());
|
||||
Address functionAddr = space.getAddress(
|
||||
is64bit ? mem.getLong(functionPointerAddr) : mem.getInt(functionPointerAddr));
|
||||
symbolTable.createLabel(functionAddr, "_guard_ss_verify_failure_default",
|
||||
SourceType.IMPORTED);
|
||||
|
||||
|
||||
if (program.getListing().getDefinedDataAt(functionAddr) == null) {
|
||||
AbstractProgramLoader.markAsFunction(program, null, functionAddr);
|
||||
|
||||
}
|
||||
catch (MemoryAccessException | AddressOutOfBoundsException | InvalidInputException e) {
|
||||
Msg.warn(ControlFlowGuard.class,
|
||||
"Unable to label ReturnFlowGuard default failure routine.", e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Performs markup on the ReturnFlowGuard verify stack pointer function, if it exists.
|
||||
*
|
||||
* @param lcd The PE LoadConfigDirectory.
|
||||
* @param program The program
|
||||
* @param is64bit True if the PE is 64-bit; false if it's 32-bit.
|
||||
* @param space The program's address space.
|
||||
* @param mem The program's memory.
|
||||
* @param symbolTable The program's symbol table.
|
||||
*/
|
||||
private static void markupRfgDefaultStackPointerFunction(LoadConfigDirectory lcd,
|
||||
Program program, boolean is64bit, AddressSpace space, Memory mem,
|
||||
SymbolTable symbolTable) {
|
||||
|
||||
if (lcd.getRfgVerifyStackPointerFunctionPointer() == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
Address functionPointerAddr =
|
||||
space.getAddress(lcd.getRfgVerifyStackPointerFunctionPointer());
|
||||
Address functionAddr = space.getAddress(
|
||||
is64bit ? mem.getLong(functionPointerAddr) : mem.getInt(functionPointerAddr));
|
||||
symbolTable.createLabel(functionAddr, "_guard_ss_verify_sp_default",
|
||||
SourceType.IMPORTED);
|
||||
|
||||
AbstractProgramLoader.markAsFunction(program, null, functionAddr);
|
||||
|
||||
}
|
||||
catch (MemoryAccessException | AddressOutOfBoundsException | InvalidInputException e) {
|
||||
Msg.warn(ControlFlowGuard.class,
|
||||
"Unable to label ReturnFlowGuard verify stack pointer function.", e);
|
||||
else {
|
||||
log.appendMsg("Unable to mark %s as function at %s. Data is already defined there."
|
||||
.formatted(description, functionAddr));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -963,7 +963,14 @@ public class MachoProgramBuilder {
|
||||
space.getAddress(segments.get(binding.getSegmentIndex()).getVMaddress() +
|
||||
binding.getSegmentOffset());
|
||||
|
||||
fixupExternalLibrary(binding.getLibraryOrdinal(), symbol, libraryPaths);
|
||||
try {
|
||||
fixupExternalLibrary(program, libraryPaths, binding.getLibraryOrdinal(),
|
||||
symbol);
|
||||
}
|
||||
catch (Exception e) {
|
||||
log.appendMsg("WARNING: Problem fixing up symbol '%s' - %s"
|
||||
.formatted(symbol.getName(), e.getMessage()));
|
||||
}
|
||||
|
||||
boolean success = false;
|
||||
try {
|
||||
@ -983,20 +990,6 @@ public class MachoProgramBuilder {
|
||||
}
|
||||
}
|
||||
|
||||
private void fixupExternalLibrary(int libraryOrdinal, Symbol symbol, List<String> libraryPaths)
|
||||
throws InvalidInputException {
|
||||
ExternalManager extManager = program.getExternalManager();
|
||||
int libraryIndex = libraryOrdinal - 1;
|
||||
if (libraryIndex >= 0 && libraryIndex < libraryPaths.size()) {
|
||||
Library library = extManager.getExternalLibrary(libraryPaths.get(libraryIndex));
|
||||
ExternalLocation loc =
|
||||
extManager.getUniqueExternalLocation(Library.UNKNOWN, symbol.getName());
|
||||
if (loc != null) {
|
||||
loc.setName(library, symbol.getName(), SourceType.IMPORTED);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected void markupHeaders(MachHeader header, Address headerAddr) throws Exception {
|
||||
monitor.setMessage("Processing header markup...");
|
||||
|
||||
@ -1896,4 +1889,40 @@ public class MachoProgramBuilder {
|
||||
SourceType.IMPORTED);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Associates the given {@link Symbol} with the correct external {@link Library} (fixing
|
||||
* the {@code <EXTERNAL>} association)
|
||||
*
|
||||
* @param program The {@link Program}
|
||||
* @param libraryPaths A {@link List} of library paths
|
||||
* @param libraryOrdinal The library ordinal
|
||||
* @param symbol The {@link Symbol}
|
||||
* @throws Exception if an unexpected problem occurs
|
||||
*/
|
||||
public static void fixupExternalLibrary(Program program, List<String> libraryPaths,
|
||||
int libraryOrdinal, Symbol symbol) throws Exception {
|
||||
ExternalManager extManager = program.getExternalManager();
|
||||
int libraryIndex = libraryOrdinal - 1;
|
||||
if (libraryIndex < 0 || libraryIndex >= libraryPaths.size()) {
|
||||
throw new Exception(
|
||||
"Library ordinal '%d' outside of expected range".formatted(libraryOrdinal));
|
||||
}
|
||||
String libraryName = libraryPaths.get(libraryIndex).replaceAll(" ", "_");
|
||||
Library library = extManager.getExternalLibrary(libraryName);
|
||||
if (library == null) {
|
||||
throw new Exception(
|
||||
"Library '%s' not found in external program list".formatted(libraryName));
|
||||
}
|
||||
ExternalLocation loc =
|
||||
extManager.getUniqueExternalLocation(Library.UNKNOWN, symbol.getName());
|
||||
if (loc != null) {
|
||||
try {
|
||||
loc.setName(library, symbol.getName(), SourceType.IMPORTED);
|
||||
}
|
||||
catch (InvalidInputException e) {
|
||||
throw new Exception("Symbol name contains illegal characters");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -4,9 +4,9 @@
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@ -433,8 +433,6 @@ public class PeLoader extends AbstractPeDebugLoader {
|
||||
return;
|
||||
}
|
||||
|
||||
log.appendMsg("Delay imports detected");
|
||||
|
||||
AddressSpace space = program.getAddressFactory().getDefaultAddressSpace();
|
||||
Listing listing = program.getListing();
|
||||
ReferenceManager refManager = program.getReferenceManager();
|
||||
|
@ -66,6 +66,7 @@ src/decompile/datatests/ptrtoarray.xml||GHIDRA||||END|
|
||||
src/decompile/datatests/readvolatile.xml||GHIDRA||||END|
|
||||
src/decompile/datatests/retspecial.xml||GHIDRA||||END|
|
||||
src/decompile/datatests/retstruct.xml||GHIDRA||||END|
|
||||
src/decompile/datatests/revisit.xml||GHIDRA||||END|
|
||||
src/decompile/datatests/sbyte.xml||GHIDRA||||END|
|
||||
src/decompile/datatests/skipnext2.xml||GHIDRA||||END|
|
||||
src/decompile/datatests/stackreturn.xml||GHIDRA||||END|
|
||||
|
@ -2454,7 +2454,11 @@ public class RTTIWindowsClassRecoverer extends RTTIClassRecoverer {
|
||||
|
||||
Map<RecoveredClass, Integer> parentOffsetMap = getBaseClassOffsetMap(recoveredClass);
|
||||
|
||||
return parentOffsetMap.get(virtualParentClasses.get(0));
|
||||
if (parentOffsetMap != null) {
|
||||
return parentOffsetMap.get(virtualParentClasses.get(0));
|
||||
}
|
||||
|
||||
return null;
|
||||
|
||||
}
|
||||
|
||||
|
@ -2345,7 +2345,7 @@ int4 BlockBasic::flipInPlaceTest(vector<PcodeOp *> &fliplist) const
|
||||
PcodeOp *lastop = op.back();
|
||||
if (lastop->code() != CPUI_CBRANCH)
|
||||
return 2;
|
||||
return opFlipInPlaceTest(lastop,fliplist);
|
||||
return Funcdata::opFlipInPlaceTest(lastop,fliplist);
|
||||
}
|
||||
|
||||
void BlockBasic::flipInPlaceExecute(void)
|
||||
@ -2574,9 +2574,16 @@ bool BlockBasic::isDoNothing(void) const
|
||||
if (sizeIn() == 0) return false; // A block that does nothing but
|
||||
// is a starting block, may need to be a
|
||||
// placeholder for global(persistent) vars
|
||||
if ((sizeIn()==1)&&(getIn(0)->isSwitchOut())) {
|
||||
if (getOut(0)->sizeIn() > 1)
|
||||
return false; // Don't remove switch targets
|
||||
for(int4 i=0;i<sizeIn();++i) {
|
||||
const FlowBlock *switchbl = getIn(i);
|
||||
if (!switchbl->isSwitchOut()) continue;
|
||||
if (switchbl->sizeOut() > 1) {
|
||||
// This block is a switch target
|
||||
if (getOut(0)->sizeIn() > 1) { // Multiple edges coming together
|
||||
// Switch edge may still be propagating a unique value
|
||||
return false; // Don't remove it
|
||||
}
|
||||
}
|
||||
}
|
||||
PcodeOp *lastop = lastOp();
|
||||
if ((lastop != (PcodeOp *)0)&&(lastop->code()==CPUI_BRANCHIND))
|
||||
@ -2719,6 +2726,29 @@ PcodeOp *BlockBasic::findMultiequal(const vector<Varnode *> &varArray)
|
||||
return op;
|
||||
}
|
||||
|
||||
/// \brief Get the earliest use/read of a Varnode in \b this basic block
|
||||
///
|
||||
/// \param vn is the Varnode to search for
|
||||
/// \return the earliest PcodeOp reading the Varnode or NULL
|
||||
PcodeOp *BlockBasic::earliestUse(Varnode *vn)
|
||||
|
||||
{
|
||||
list<PcodeOp *>::const_iterator iter;
|
||||
PcodeOp *res = (PcodeOp *)0;
|
||||
|
||||
for(iter=vn->beginDescend();iter!=vn->endDescend();++iter) {
|
||||
PcodeOp *op = *iter;
|
||||
if (op->getParent() != this) continue;
|
||||
if (res == (PcodeOp *)0)
|
||||
res = op;
|
||||
else {
|
||||
if (op->getSeqNum().getOrder() < res->getSeqNum().getOrder())
|
||||
res = op;
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
/// Each Varnode must be defined by a PcodeOp with the same OpCode. The Varnode, within the array, is replaced
|
||||
/// with the input Varnode in the indicated slot.
|
||||
/// \param varArray is the given array of Varnodes
|
||||
@ -3028,7 +3058,7 @@ bool BlockIf::preferComplement(Funcdata &data)
|
||||
if (0 != split->flipInPlaceTest(fliplist))
|
||||
return false;
|
||||
split->flipInPlaceExecute();
|
||||
opFlipInPlaceExecute(data,fliplist);
|
||||
data.opFlipInPlaceExecute(fliplist);
|
||||
swapBlocks(1,2);
|
||||
return true;
|
||||
}
|
||||
|
@ -492,6 +492,7 @@ public:
|
||||
bool emptyOp(void) const { return op.empty(); } ///< Return \b true if \b block contains no operations
|
||||
bool noInterveningStatement(void) const;
|
||||
PcodeOp *findMultiequal(const vector<Varnode *> &varArray); ///< Find MULTIEQUAL with given inputs
|
||||
PcodeOp *earliestUse(Varnode *vn);
|
||||
static bool liftVerifyUnroll(vector<Varnode *> &varArray,int4 slot); ///< Verify given Varnodes are defined with same PcodeOp
|
||||
};
|
||||
|
||||
|
@ -4,9 +4,9 @@
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@ -2135,9 +2135,9 @@ int4 ActionNormalizeBranches::apply(Funcdata &data)
|
||||
if (cbranch == (PcodeOp *)0) continue;
|
||||
if (cbranch->code() != CPUI_CBRANCH) continue;
|
||||
fliplist.clear();
|
||||
if (opFlipInPlaceTest(cbranch,fliplist) != 0)
|
||||
if (Funcdata::opFlipInPlaceTest(cbranch,fliplist) != 0)
|
||||
continue;
|
||||
opFlipInPlaceExecute(data,fliplist);
|
||||
data.opFlipInPlaceExecute(fliplist);
|
||||
bb->flipInPlaceExecute();
|
||||
count += 1; // Indicate a change was made
|
||||
}
|
||||
|
@ -459,7 +459,7 @@ bool StringSequence::transform(void)
|
||||
return true;
|
||||
}
|
||||
|
||||
/// From a starting pointer, backtrack through PTRADDs to a putative root Varnode pointer.
|
||||
/// From a starting pointer, backtrack through PTRADDs and COPYs to a putative root Varnode pointer.
|
||||
/// \param initPtr is pointer Varnode into the root STORE
|
||||
void HeapSequence::findBasePointer(Varnode *initPtr)
|
||||
|
||||
@ -467,22 +467,84 @@ void HeapSequence::findBasePointer(Varnode *initPtr)
|
||||
basePointer = initPtr;
|
||||
while(basePointer->isWritten()) {
|
||||
PcodeOp *op = basePointer->getDef();
|
||||
if (op->code() != CPUI_PTRADD) break;
|
||||
int8 sz = op->getIn(2)->getOffset();
|
||||
if (sz != charType->getAlignSize()) break;
|
||||
OpCode opc = op->code();
|
||||
if (opc == CPUI_PTRADD) {
|
||||
int8 sz = op->getIn(2)->getOffset();
|
||||
if (sz != charType->getAlignSize()) break;
|
||||
}
|
||||
else if (opc != CPUI_COPY)
|
||||
break;
|
||||
basePointer = op->getIn(0);
|
||||
}
|
||||
}
|
||||
|
||||
/// Back-track from \b basePointer through PTRSUBs, PTRADDs, and INT_ADDs to an earlier root, keeping track
|
||||
/// of any offsets. If an earlier root exists, trace forward, through ops trying to match the offsets.
|
||||
/// For trace of ops whose offsets match exactly, the resulting Varnode is added to the list of duplicates.
|
||||
/// \param duplist will hold the list of duplicate Varnodes (including \b basePointer)
|
||||
void HeapSequence::findDuplicateBases(vector<Varnode *> &duplist)
|
||||
|
||||
{
|
||||
if (!basePointer->isWritten()) {
|
||||
duplist.push_back(basePointer);
|
||||
return;
|
||||
}
|
||||
PcodeOp *op = basePointer->getDef();
|
||||
OpCode opc = op->code();
|
||||
if ((opc != CPUI_PTRSUB && opc != CPUI_INT_ADD && opc != CPUI_PTRADD) || !op->getIn(1)->isConstant()) {
|
||||
duplist.push_back(basePointer);
|
||||
return;
|
||||
}
|
||||
Varnode *copyRoot = basePointer;
|
||||
vector<uintb> offset;
|
||||
do {
|
||||
uintb off = op->getIn(1)->getOffset();
|
||||
if (opc == CPUI_PTRADD)
|
||||
off *= op->getIn(2)->getOffset();
|
||||
offset.push_back(off);
|
||||
copyRoot = op->getIn(0);
|
||||
if (!copyRoot->isWritten()) break;
|
||||
op = copyRoot->getDef();
|
||||
opc = op->code();
|
||||
if (opc != CPUI_PTRSUB && opc != CPUI_INT_ADD && opc != CPUI_PTRSUB)
|
||||
break;
|
||||
} while(op->getIn(1)->isConstant());
|
||||
|
||||
duplist.push_back(copyRoot);
|
||||
vector<Varnode *> midlist;
|
||||
for(int4 i=offset.size()-1;i>=0;--i) {
|
||||
duplist.swap(midlist);
|
||||
duplist.clear();
|
||||
for(int4 j=0;j<midlist.size();++j) {
|
||||
Varnode *vn = midlist[j];
|
||||
list<PcodeOp *>::const_iterator iter = vn->beginDescend();
|
||||
while(iter != vn->endDescend()) {
|
||||
op = *iter;
|
||||
++iter;
|
||||
opc = op->code();
|
||||
if (opc != CPUI_PTRSUB && opc != CPUI_INT_ADD && opc != CPUI_PTRSUB)
|
||||
continue;
|
||||
if (op->getIn(0) != vn || !op->getIn(1)->isConstant())
|
||||
continue;
|
||||
uintb off = op->getIn(1)->getOffset();
|
||||
if (opc == CPUI_PTRADD)
|
||||
off *= op->getIn(2)->getOffset();
|
||||
if (off != offset[i])
|
||||
continue;
|
||||
duplist.push_back(op->getOut());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Find STOREs with pointers derived from the \b basePointer and that are in the same
|
||||
/// basic block as the root STORE. The root STORE is \e not included in the resulting set.
|
||||
/// \param stores holds the collected STOREs
|
||||
void HeapSequence::findInitialStores(vector<PcodeOp *> &stores)
|
||||
|
||||
{
|
||||
Datatype *ptrType = rootOp->getIn(1)->getTypeReadFacing(rootOp);
|
||||
vector<Varnode *> ptradds;
|
||||
ptradds.push_back(basePointer);
|
||||
findDuplicateBases(ptradds);
|
||||
int4 pos = 0;
|
||||
int4 alignSize = charType->getAlignSize();
|
||||
while(pos < ptradds.size()) {
|
||||
@ -494,10 +556,14 @@ void HeapSequence::findInitialStores(vector<PcodeOp *> &stores)
|
||||
OpCode opc = op->code();
|
||||
if (opc == CPUI_PTRADD) {
|
||||
if (op->getIn(0) != vn) continue;
|
||||
if (op->getOut()->getTypeDefFacing() != ptrType) continue;
|
||||
// We only check array element size here, if we checked the data-type, we would
|
||||
// need to take into account different pointer styles to the same element data-type
|
||||
if (op->getIn(2)->getOffset() != alignSize) continue;
|
||||
ptradds.push_back(op->getOut());
|
||||
}
|
||||
else if (opc == CPUI_COPY) {
|
||||
ptradds.push_back(op->getOut());
|
||||
}
|
||||
else if (opc == CPUI_STORE && op->getParent() == block && op != rootOp) {
|
||||
if (op->getIn(1) != vn) continue;
|
||||
stores.push_back(op);
|
||||
@ -530,7 +596,7 @@ uint8 HeapSequence::calcAddElements(Varnode *vn,vector<Varnode *> &nonConst,int4
|
||||
|
||||
/// \brief Calculate the offset and any non-constant additive elements between the given Varnode and the \b basePointer
|
||||
///
|
||||
/// Walk backward from the given Varnode thru PTRADDs and ADDs, summing any offsets encountered.
|
||||
/// Walk backward from the given Varnode thru PTRADDs and COPYs, summing any offsets encountered.
|
||||
/// Any non-constant Varnodes encountered in the path, that are not themselves a pointer, are passed back in a list.
|
||||
/// \param vn is the given Varnode to trace back to the \b basePointer
|
||||
/// \param nonConst will hold the list of non-constant Varnodes being passed back
|
||||
@ -539,12 +605,23 @@ uint8 HeapSequence::calcPtraddOffset(Varnode *vn,vector<Varnode *> &nonConst)
|
||||
|
||||
{
|
||||
uint8 res = 0;
|
||||
while(vn != basePointer) {
|
||||
PcodeOp *ptradd = vn->getDef();
|
||||
uint8 off = calcAddElements(ptradd->getIn(1),nonConst,3);
|
||||
off *= (uint8)ptradd->getIn(2)->getOffset();
|
||||
res += off;
|
||||
vn = ptradd->getIn(0);
|
||||
while(vn->isWritten()) {
|
||||
PcodeOp *op = vn->getDef();
|
||||
OpCode opc = op->code();
|
||||
if (opc == CPUI_PTRADD) {
|
||||
uint8 mult = op->getIn(2)->getOffset();
|
||||
if (mult != charType->getAlignSize())
|
||||
break;
|
||||
uint8 off = calcAddElements(op->getIn(1),nonConst,3);
|
||||
off *= mult;
|
||||
res += off;
|
||||
vn = op->getIn(0);
|
||||
}
|
||||
else if (opc == CPUI_COPY) {
|
||||
vn = op->getIn(0);
|
||||
}
|
||||
else
|
||||
break;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
@ -89,6 +89,7 @@ class HeapSequence : public ArraySequence {
|
||||
uint8 baseOffset; ///< Offset relative to pointer to root STORE
|
||||
vector<Varnode *> nonConstAdds; ///< non-constant Varnodes being added into pointer calculation
|
||||
void findBasePointer(Varnode *initPtr); ///< Find the base pointer for the sequence
|
||||
void findDuplicateBases(vector<Varnode *> &duplist); ///< Find any duplicates of \b basePointer
|
||||
void findInitialStores(vector<PcodeOp *> &stores);
|
||||
static uint8 calcAddElements(Varnode *vn,vector<Varnode *> &nonConst,int4 maxDepth);
|
||||
uint8 calcPtraddOffset(Varnode *vn,vector<Varnode *> &nonConst);
|
||||
|
@ -2729,7 +2729,7 @@ int4 ActionSetCasts::apply(Funcdata &data)
|
||||
data.opUndoPtradd(op,true);
|
||||
}
|
||||
else if (opc == CPUI_PTRSUB) { // Check for PTRSUB that no longer fits pointer
|
||||
if (!op->getIn(0)->getTypeReadFacing(op)->isPtrsubMatching(op->getIn(1)->getOffset(),0,1)) {
|
||||
if (!op->getIn(0)->getTypeReadFacing(op)->isPtrsubMatching(op->getIn(1)->getOffset(),0,0)) {
|
||||
if (op->getIn(1)->getOffset() == 0) {
|
||||
data.opRemoveInput(op, 1);
|
||||
data.opSetOpcode(op, CPUI_COPY);
|
||||
@ -3402,8 +3402,7 @@ int4 ActionMarkImplied::apply(Funcdata &data)
|
||||
{
|
||||
VarnodeLocSet::const_iterator viter;
|
||||
list<PcodeOp *>::const_iterator oiter;
|
||||
Varnode *vn,*vncur,*defvn,*outvn;
|
||||
PcodeOp *op;
|
||||
Varnode *vn,*vncur,*outvn;
|
||||
vector<DescTreeElement> varstack; // Depth first varnode traversal stack
|
||||
|
||||
for(viter=data.beginLoc();viter!=data.endLoc();++viter) {
|
||||
@ -3420,16 +3419,9 @@ int4 ActionMarkImplied::apply(Funcdata &data)
|
||||
if (!checkImpliedCover(data,vncur)) // Can this variable be implied
|
||||
vncur->setExplicit(); // if not, mark explicit
|
||||
else {
|
||||
vncur->setImplied(); // Mark as implied
|
||||
op = vncur->getDef();
|
||||
Merge::markImplied(vncur);
|
||||
// setting the implied type is now taken care of by ActionSetCasts
|
||||
// vn->updatetype(op->outputtype_token(),false,false); // implied must have parsed type
|
||||
// Back propagate varnode's cover to inputs of defining op
|
||||
for(int4 i=0;i<op->numInput();++i) {
|
||||
defvn = op->getIn(i);
|
||||
if (!defvn->hasCover()) continue;
|
||||
data.getMerge().inflate(defvn,vncur->getHigh());
|
||||
}
|
||||
}
|
||||
varstack.pop_back();
|
||||
}
|
||||
|
@ -4,9 +4,9 @@
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@ -477,11 +477,22 @@ void Cover::merge(const Cover &op2)
|
||||
void Cover::rebuild(const Varnode *vn)
|
||||
|
||||
{
|
||||
list<PcodeOp *>::const_iterator iter;
|
||||
vector<const Varnode *> path(1,vn);
|
||||
int4 pos = 0;
|
||||
|
||||
addDefPoint(vn);
|
||||
for(iter=vn->beginDescend();iter!=vn->endDescend();++iter)
|
||||
addRefPoint(*iter,vn);
|
||||
do {
|
||||
const Varnode *curVn = path[pos];
|
||||
pos += 1;
|
||||
list<PcodeOp *>::const_iterator iter;
|
||||
for(iter=curVn->beginDescend();iter!=curVn->endDescend();++iter) {
|
||||
const PcodeOp *op = *iter;
|
||||
addRefPoint(op,vn);
|
||||
const Varnode *outVn = op->getOut();
|
||||
if (outVn != (Varnode *)0 && outVn->isImplied())
|
||||
path.push_back(outVn);
|
||||
}
|
||||
} while(pos < path.size());
|
||||
}
|
||||
|
||||
/// Any previous cover is removed. Calling this with an
|
||||
|
@ -1403,7 +1403,7 @@ void SplitVarnode::replaceCopyForce(Funcdata &data,const Address &addr,SplitVarn
|
||||
|
||||
{
|
||||
Varnode *inVn = in.getWhole();
|
||||
bool returnForm = copyhi->stopsCopyPropagation();
|
||||
bool returnForm = copyhi->isReturnCopy();
|
||||
if (returnForm && inVn->getAddr() != addr) {
|
||||
// Placeholder for global propagation past a RETURN needs an additional COPY
|
||||
PcodeOp *otherPoint1 = copyhi->getIn(0)->getDef();
|
||||
@ -1423,7 +1423,7 @@ void SplitVarnode::replaceCopyForce(Funcdata &data,const Address &addr,SplitVarn
|
||||
Varnode *outVn = data.newVarnodeOut(in.getSize(), addr, wholeCopy);
|
||||
outVn->setAddrForce();
|
||||
if (returnForm)
|
||||
wholeCopy->setStopCopyPropagation();
|
||||
data.markReturnCopy(wholeCopy);
|
||||
data.opSetInput(wholeCopy,inVn,0);
|
||||
data.opInsertBefore(wholeCopy, copyhi);
|
||||
data.opDestroy(copyhi); // Destroy the original COPYs. Outputs have no descendants.
|
||||
@ -3156,7 +3156,7 @@ bool CopyForceForm::verify(Varnode *h,Varnode *l,Varnode *w,PcodeOp *cpy)
|
||||
continue;
|
||||
if (!SplitVarnode::isAddrTiedContiguous(reslo, reshi, addrOut)) // Output MUST be contiguous addresses
|
||||
continue;
|
||||
if (copyhi->stopsCopyPropagation()) { // Special form has additional requirements
|
||||
if (copyhi->isReturnCopy()) { // Special form has additional requirements
|
||||
if (h->loneDescend() == (PcodeOp *)0)
|
||||
continue;
|
||||
if (l->loneDescend() == (PcodeOp *)0)
|
||||
|
@ -4,9 +4,9 @@
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@ -172,6 +172,8 @@ void Funcdata::stopProcessing(void)
|
||||
{
|
||||
flags |= processing_complete;
|
||||
obank.destroyDead(); // Free up anything in the dead list
|
||||
if (!isJumptableRecoveryOn())
|
||||
issueDatatypeWarnings();
|
||||
#ifdef CPUI_STATISTICS
|
||||
glb->stats->process(*this);
|
||||
#endif
|
||||
@ -470,6 +472,15 @@ void Funcdata::clearCallSpecs(void)
|
||||
qlst.clear(); // Delete list of pointers
|
||||
}
|
||||
|
||||
void Funcdata::issueDatatypeWarnings(void)
|
||||
|
||||
{
|
||||
list<DatatypeWarning>::const_iterator iter;
|
||||
for(iter=glb->types->beginWarnings();iter!=glb->types->endWarnings();++iter) {
|
||||
warningHeader((*iter).getWarning());
|
||||
}
|
||||
}
|
||||
|
||||
FuncCallSpecs *Funcdata::getCallSpecs(const PcodeOp *op) const
|
||||
|
||||
{
|
||||
|
@ -122,16 +122,17 @@ class Funcdata {
|
||||
JumpTable::RecoveryMode stageJumpTable(Funcdata &partial,JumpTable *jt,PcodeOp *op,FlowInfo *flow);
|
||||
void switchOverJumpTables(const FlowInfo &flow); ///< Convert jump-table addresses to basic block indices
|
||||
void clearJumpTables(void); ///< Clear any jump-table information
|
||||
|
||||
void sortCallSpecs(void); ///< Sort calls using a dominance based order
|
||||
void deleteCallSpecs(PcodeOp *op); ///< Remove the specification for a particular call
|
||||
void clearCallSpecs(void); ///< Remove all call specifications
|
||||
|
||||
BlockBasic *nodeSplitBlockEdge(BlockBasic *b,int4 inedge);
|
||||
PcodeOp *nodeSplitCloneOp(PcodeOp *op);
|
||||
void nodeSplitCloneVarnode(PcodeOp *op,PcodeOp *newop);
|
||||
void nodeSplitRawDuplicate(BlockBasic *b,BlockBasic *bprime);
|
||||
void nodeSplitInputPatch(BlockBasic *b,BlockBasic *bprime,int4 inedge);
|
||||
|
||||
void sortCallSpecs(void); ///< Sort calls using a dominance based order
|
||||
void deleteCallSpecs(PcodeOp *op); ///< Remove the specification for a particular call
|
||||
void clearCallSpecs(void); ///< Remove all call specifications
|
||||
void issueDatatypeWarnings(void); ///< Add warning headers for any data-types that have been modified
|
||||
|
||||
static bool descendantsOutside(Varnode *vn);
|
||||
static void encodeVarnode(Encoder &encoder,VarnodeLocSet::const_iterator iter,VarnodeLocSet::const_iterator enditer);
|
||||
static bool checkIndirectUse(Varnode *vn);
|
||||
@ -445,6 +446,7 @@ public:
|
||||
PcodeOp *newIndirectOp(PcodeOp *indeffect,const Address &addr,int4 sz,uint4 extraFlags);
|
||||
PcodeOp *newIndirectCreation(PcodeOp *indeffect,const Address &addr,int4 sz,bool possibleout);
|
||||
void markIndirectCreation(PcodeOp *indop,bool possibleOutput); ///< Convert CPUI_INDIRECT into an \e indirect \e creation
|
||||
void markReturnCopy(PcodeOp *op) { op->flags |= PcodeOp::return_copy; } ///< Mark COPY as returning a global value
|
||||
PcodeOp *findOp(const SeqNum &sq) { return obank.findOp(sq); } ///< Find PcodeOp with given sequence number
|
||||
void opInsertBefore(PcodeOp *op,PcodeOp *follow); ///< Insert given PcodeOp before a specific op
|
||||
void opInsertAfter(PcodeOp *op,PcodeOp *prev); ///< Insert given PcodeOp after a specific op
|
||||
@ -486,6 +488,8 @@ public:
|
||||
Varnode *opStackLoad(AddrSpace *spc,uintb off,uint4 sz,PcodeOp *op,Varnode *stackptr,bool insertafter);
|
||||
PcodeOp *opStackStore(AddrSpace *spc,uintb off,PcodeOp *op,bool insertafter);
|
||||
void opUndoPtradd(PcodeOp *op,bool finalize); ///< Convert a CPUI_PTRADD back into a CPUI_INT_ADD
|
||||
static int4 opFlipInPlaceTest(PcodeOp *op,vector<PcodeOp *> &fliplist);
|
||||
void opFlipInPlaceExecute(vector<PcodeOp *> &fliplist);
|
||||
|
||||
/// \brief Start of PcodeOp objects with the given op-code
|
||||
list<PcodeOp *>::const_iterator beginOp(OpCode opc) const { return obank.begin(opc); }
|
||||
@ -561,6 +565,11 @@ public:
|
||||
bool replaceLessequal(PcodeOp *op); ///< Replace INT_LESSEQUAL and INT_SLESSEQUAL expressions
|
||||
bool distributeIntMultAdd(PcodeOp *op); ///< Distribute constant coefficient to additive input
|
||||
bool collapseIntMultMult(Varnode *vn); ///< Collapse constant coefficients for two chained CPUI_INT_MULT
|
||||
Varnode *buildCopyTemp(Varnode *vn,PcodeOp *point); ///< Create a COPY of given Varnode in a temporary register
|
||||
|
||||
static PcodeOp *cseFindInBlock(PcodeOp *op,Varnode *vn,BlockBasic *bl,PcodeOp *earliest);
|
||||
PcodeOp *cseElimination(PcodeOp *op1,PcodeOp *op2);
|
||||
void cseEliminateList(vector< pair<uintm,PcodeOp *> > &list,vector<Varnode *> &outlist);
|
||||
static bool compareCallspecs(const FuncCallSpecs *a,const FuncCallSpecs *b);
|
||||
|
||||
#ifdef OPACTION_DEBUG
|
||||
@ -686,14 +695,5 @@ public:
|
||||
bool execute(PcodeOp *op,int4 slot,ParamTrial *t,bool allowFail);
|
||||
};
|
||||
|
||||
extern int4 opFlipInPlaceTest(PcodeOp *op,vector<PcodeOp *> &fliplist);
|
||||
extern void opFlipInPlaceExecute(Funcdata &data,vector<PcodeOp *> &fliplist);
|
||||
|
||||
extern PcodeOp *earliestUseInBlock(Varnode *vn,BlockBasic *bl);
|
||||
extern PcodeOp *cseFindInBlock(PcodeOp *op,Varnode *vn,BlockBasic *bl,PcodeOp *earliest);
|
||||
extern PcodeOp *cseElimination(Funcdata &data,PcodeOp *op1,PcodeOp *op2);
|
||||
extern void cseEliminateList(Funcdata &data,vector< pair<uintm,PcodeOp *> > &list,
|
||||
vector<Varnode *> &outlist);
|
||||
|
||||
} // End namespace ghidra
|
||||
#endif
|
||||
|
@ -4,9 +4,9 @@
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@ -1100,6 +1100,66 @@ bool Funcdata::collapseIntMultMult(Varnode *vn)
|
||||
return true;
|
||||
}
|
||||
|
||||
/// Return a Varnode in the \e unique space that is defined by a COPY op taking the given Varnode as input.
|
||||
/// If a COPY op to a \e unique already exists, it may be returned. If the preexisting COPY is not usable
|
||||
/// at the specified \b point, it is redefined at an earlier point in the control-flow so that it can be used.
|
||||
/// \param vn is the given Varnode to COPY
|
||||
/// \param point is the PcodeOp where the copy needs to be available
|
||||
/// \return the \e unique Varnode COPY
|
||||
Varnode *Funcdata::buildCopyTemp(Varnode *vn,PcodeOp *point)
|
||||
|
||||
{
|
||||
PcodeOp *otherOp = (PcodeOp *)0;
|
||||
PcodeOp *usedCopy = (PcodeOp *)0;
|
||||
list<PcodeOp *>::const_iterator iter;
|
||||
for(iter=vn->beginDescend();iter!=vn->endDescend();++iter) {
|
||||
PcodeOp *op = *iter;
|
||||
if (op->code() != CPUI_COPY) continue;
|
||||
Varnode *outvn = op->getOut();
|
||||
if (outvn->getSpace()->getType() == IPTR_INTERNAL) {
|
||||
if (outvn->isTypeLock())
|
||||
continue;
|
||||
otherOp = op;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (otherOp != (PcodeOp *)0) {
|
||||
if (point->getParent() == otherOp->getParent()) {
|
||||
if (point->getSeqNum().getOrder() < otherOp->getSeqNum().getOrder())
|
||||
usedCopy = (PcodeOp *)0;
|
||||
else
|
||||
usedCopy = otherOp;
|
||||
}
|
||||
else {
|
||||
BlockBasic *common;
|
||||
common = (BlockBasic *)FlowBlock::findCommonBlock(point->getParent(),otherOp->getParent());
|
||||
if (common == point->getParent())
|
||||
usedCopy = (PcodeOp *)0;
|
||||
else if (common == otherOp->getParent())
|
||||
usedCopy = otherOp;
|
||||
else { // Neither op is ancestor of the other
|
||||
usedCopy = newOp(1,common->getStop());
|
||||
opSetOpcode(usedCopy,CPUI_COPY);
|
||||
newUniqueOut(vn->getSize(),usedCopy);
|
||||
opSetInput(usedCopy,vn,0);
|
||||
opInsertEnd(usedCopy,common);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (usedCopy == (PcodeOp *)0) {
|
||||
usedCopy = newOp(1,point->getAddr());
|
||||
opSetOpcode(usedCopy, CPUI_COPY);
|
||||
newUniqueOut(vn->getSize(), usedCopy);
|
||||
opSetInput(usedCopy, vn, 0);
|
||||
opInsertBefore(usedCopy, point);
|
||||
}
|
||||
if (otherOp != (PcodeOp *)0 && otherOp != usedCopy) {
|
||||
totalReplace(otherOp->getOut(),usedCopy->getOut());
|
||||
opDestroy(otherOp);
|
||||
}
|
||||
return usedCopy->getOut();
|
||||
}
|
||||
|
||||
/// \brief Trace a boolean value to a set of PcodeOps that can be changed to flip the boolean value
|
||||
///
|
||||
/// The boolean Varnode is either the output of the given PcodeOp or the
|
||||
@ -1108,7 +1168,7 @@ bool Funcdata::collapseIntMultMult(Varnode *vn)
|
||||
/// \param op is the given PcodeOp
|
||||
/// \param fliplist is the array that will hold the ops to flip
|
||||
/// \return 0 if the change normalizes, 1 if the change is ambivalent, 2 if the change does not normalize
|
||||
int4 opFlipInPlaceTest(PcodeOp *op,vector<PcodeOp *> &fliplist)
|
||||
int4 Funcdata::opFlipInPlaceTest(PcodeOp *op,vector<PcodeOp *> &fliplist)
|
||||
|
||||
{
|
||||
Varnode *vn;
|
||||
@ -1168,7 +1228,7 @@ int4 opFlipInPlaceTest(PcodeOp *op,vector<PcodeOp *> &fliplist)
|
||||
/// facilitate the flip.
|
||||
/// \param data is the function being modified
|
||||
/// \param fliplist is the list of PcodeOps to modify
|
||||
void opFlipInPlaceExecute(Funcdata &data,vector<PcodeOp *> &fliplist)
|
||||
void Funcdata::opFlipInPlaceExecute(vector<PcodeOp *> &fliplist)
|
||||
|
||||
{
|
||||
Varnode *vn;
|
||||
@ -1180,53 +1240,29 @@ void opFlipInPlaceExecute(Funcdata &data,vector<PcodeOp *> &fliplist)
|
||||
vn = op->getIn(0);
|
||||
PcodeOp *otherop = op->getOut()->loneDescend(); // Must be a lone descendant
|
||||
int4 slot = otherop->getSlot(op->getOut());
|
||||
data.opSetInput(otherop,vn,slot); // Propagate -vn- into otherop
|
||||
data.opDestroy(op);
|
||||
opSetInput(otherop,vn,slot); // Propagate -vn- into otherop
|
||||
opDestroy(op);
|
||||
}
|
||||
else if (opc == CPUI_MAX) {
|
||||
if (op->code() == CPUI_BOOL_AND)
|
||||
data.opSetOpcode(op,CPUI_BOOL_OR);
|
||||
opSetOpcode(op,CPUI_BOOL_OR);
|
||||
else if (op->code() == CPUI_BOOL_OR)
|
||||
data.opSetOpcode(op,CPUI_BOOL_AND);
|
||||
opSetOpcode(op,CPUI_BOOL_AND);
|
||||
else
|
||||
throw LowlevelError("Bad flipInPlace op");
|
||||
}
|
||||
else {
|
||||
data.opSetOpcode(op,opc);
|
||||
opSetOpcode(op,opc);
|
||||
if (flipyes) {
|
||||
data.opSwapInput(op,0,1);
|
||||
opSwapInput(op,0,1);
|
||||
|
||||
if ((opc == CPUI_INT_LESSEQUAL)||(opc == CPUI_INT_SLESSEQUAL))
|
||||
data.replaceLessequal(op);
|
||||
replaceLessequal(op);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// \brief Get the earliest use/read of a Varnode in a specified basic block
|
||||
///
|
||||
/// \param vn is the Varnode to search for
|
||||
/// \param bl is the specified basic block in which to search
|
||||
/// \return the earliest PcodeOp reading the Varnode or NULL
|
||||
PcodeOp *earliestUseInBlock(Varnode *vn,BlockBasic *bl)
|
||||
|
||||
{
|
||||
list<PcodeOp *>::const_iterator iter;
|
||||
PcodeOp *res = (PcodeOp *)0;
|
||||
|
||||
for(iter=vn->beginDescend();iter!=vn->endDescend();++iter) {
|
||||
PcodeOp *op = *iter;
|
||||
if (op->getParent() != bl) continue;
|
||||
if (res == (PcodeOp *)0)
|
||||
res = op;
|
||||
else {
|
||||
if (op->getSeqNum().getOrder() < res->getSeqNum().getOrder())
|
||||
res = op;
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
/// \brief Find a duplicate calculation of a given PcodeOp reading a specific Varnode
|
||||
///
|
||||
/// We only match 1 level of calculation. Additionally the duplicate must occur in the
|
||||
@ -1236,7 +1272,7 @@ PcodeOp *earliestUseInBlock(Varnode *vn,BlockBasic *bl)
|
||||
/// \param bl is the indicated basic block
|
||||
/// \param earliest is the specified op to be earlier than
|
||||
/// \return the discovered duplicate PcodeOp or NULL
|
||||
PcodeOp *cseFindInBlock(PcodeOp *op,Varnode *vn,BlockBasic *bl,PcodeOp *earliest)
|
||||
PcodeOp *Funcdata::cseFindInBlock(PcodeOp *op,Varnode *vn,BlockBasic *bl,PcodeOp *earliest)
|
||||
|
||||
{
|
||||
list<PcodeOp *>::const_iterator iter;
|
||||
@ -1265,11 +1301,10 @@ PcodeOp *cseFindInBlock(PcodeOp *op,Varnode *vn,BlockBasic *bl,PcodeOp *earliest
|
||||
/// (depth 1 functional equivalence) eliminate the redundancy. Return the remaining (dominating)
|
||||
/// PcodeOp. If neither op dominates the other, both are eliminated, and a new PcodeOp
|
||||
/// is built at a commonly accessible point.
|
||||
/// \param data is the function being modified
|
||||
/// \param op1 is the first of the given PcodeOps
|
||||
/// \param op2 is the second given PcodeOp
|
||||
/// \return the dominating PcodeOp
|
||||
PcodeOp *cseElimination(Funcdata &data,PcodeOp *op1,PcodeOp *op2)
|
||||
PcodeOp *Funcdata::cseElimination(PcodeOp *op1,PcodeOp *op2)
|
||||
|
||||
{
|
||||
PcodeOp *replace;
|
||||
@ -1288,25 +1323,25 @@ PcodeOp *cseElimination(Funcdata &data,PcodeOp *op1,PcodeOp *op2)
|
||||
else if (common == op2->getParent())
|
||||
replace = op2;
|
||||
else { // Neither op is ancestor of the other
|
||||
replace = data.newOp(op1->numInput(),common->getStop());
|
||||
data.opSetOpcode(replace,op1->code());
|
||||
data.newVarnodeOut(op1->getOut()->getSize(),op1->getOut()->getAddr(),replace);
|
||||
replace = newOp(op1->numInput(),common->getStop());
|
||||
opSetOpcode(replace,op1->code());
|
||||
newVarnodeOut(op1->getOut()->getSize(),op1->getOut()->getAddr(),replace);
|
||||
for(int4 i=0;i<op1->numInput();++i) {
|
||||
if (op1->getIn(i)->isConstant())
|
||||
data.opSetInput(replace,data.newConstant(op1->getIn(i)->getSize(),op1->getIn(i)->getOffset()),i);
|
||||
opSetInput(replace,newConstant(op1->getIn(i)->getSize(),op1->getIn(i)->getOffset()),i);
|
||||
else
|
||||
data.opSetInput(replace,op1->getIn(i),i);
|
||||
opSetInput(replace,op1->getIn(i),i);
|
||||
}
|
||||
data.opInsertEnd(replace,common);
|
||||
opInsertEnd(replace,common);
|
||||
}
|
||||
}
|
||||
if (replace != op1) {
|
||||
data.totalReplace(op1->getOut(),replace->getOut());
|
||||
data.opDestroy(op1);
|
||||
totalReplace(op1->getOut(),replace->getOut());
|
||||
opDestroy(op1);
|
||||
}
|
||||
if (replace != op2) {
|
||||
data.totalReplace(op2->getOut(),replace->getOut());
|
||||
data.opDestroy(op2);
|
||||
totalReplace(op2->getOut(),replace->getOut());
|
||||
opDestroy(op2);
|
||||
}
|
||||
return replace;
|
||||
}
|
||||
@ -1329,10 +1364,9 @@ static bool compareCseHash(const pair<uintm,PcodeOp *> &a,const pair<uintm,Pcode
|
||||
/// The hash serves as a primary test for duplicate calculations; if it doesn't match
|
||||
/// the PcodeOps aren't common subexpressions. This method searches for hash matches
|
||||
/// then does secondary testing and eliminates any redundancy it finds.
|
||||
/// \param data is the function being modified
|
||||
/// \param list is the list of (hash, PcodeOp) pairs
|
||||
/// \param outlist will hold Varnodes produced by duplicate calculations
|
||||
void cseEliminateList(Funcdata &data,vector< pair<uintm,PcodeOp *> > &list,vector<Varnode *> &outlist)
|
||||
void Funcdata::cseEliminateList(vector< pair<uintm,PcodeOp *> > &list,vector<Varnode *> &outlist)
|
||||
|
||||
{
|
||||
PcodeOp *op1,*op2,*resop;
|
||||
@ -1350,9 +1384,9 @@ void cseEliminateList(Funcdata &data,vector< pair<uintm,PcodeOp *> > &list,vecto
|
||||
if ((!op1->isDead())&&(!op2->isDead())&&op1->isCseMatch(op2)) {
|
||||
Varnode *outvn1 = op1->getOut();
|
||||
Varnode *outvn2 = op2->getOut();
|
||||
if ((outvn1 == (Varnode *)0)||data.isHeritaged(outvn1)) {
|
||||
if ((outvn2 == (Varnode *)0)||data.isHeritaged(outvn2)) {
|
||||
resop = cseElimination(data,op1,op2);
|
||||
if ((outvn1 == (Varnode *)0)||isHeritaged(outvn1)) {
|
||||
if ((outvn2 == (Varnode *)0)||isHeritaged(outvn2)) {
|
||||
resop = cseElimination(op1,op2);
|
||||
outlist.push_back(resop->getOut());
|
||||
}
|
||||
}
|
||||
|
@ -397,36 +397,45 @@ void Funcdata::combineInputVarnodes(Varnode *vnHi,Varnode *vnLo)
|
||||
if (!isContiguous)
|
||||
throw LowlevelError("Input varnodes being combined are not contiguous");
|
||||
vector<PcodeOp *> pieceList;
|
||||
bool otherOps = false;
|
||||
bool otherOpsHi = false;
|
||||
bool otherOpsLo = false;
|
||||
list<PcodeOp *>::const_iterator iter;
|
||||
for(iter=vnHi->beginDescend();iter!=vnHi->endDescend();++iter) {
|
||||
PcodeOp *op = *iter;
|
||||
if (op->code() == CPUI_PIECE && op->getIn(0) == vnHi && op->getIn(1) == vnLo)
|
||||
pieceList.push_back(op);
|
||||
else
|
||||
otherOps = true;
|
||||
otherOpsHi = true;
|
||||
}
|
||||
for(iter=vnLo->beginDescend();iter!=vnLo->endDescend();++iter) {
|
||||
PcodeOp *op = *iter;
|
||||
if (op->code() != CPUI_PIECE || op->getIn(0) != vnHi || op->getIn(1) != vnLo)
|
||||
otherOpsLo = true;
|
||||
}
|
||||
for(int4 i=0;i<pieceList.size();++i) {
|
||||
opRemoveInput(pieceList[i], 1);
|
||||
opUnsetInput(pieceList[i], 0);
|
||||
}
|
||||
// If there are other PcodeOps besides PIECEs that are directly combining vnHi and vnLo
|
||||
// create replacement Varnodes constructed as SUBPIECEs of the new combined Varnode
|
||||
PcodeOp *subHi = (PcodeOp *)0;
|
||||
PcodeOp *subLo = (PcodeOp *)0;
|
||||
if (otherOps) {
|
||||
// If there are other PcodeOps besides PIECEs that are directly combining vnHi and vnLo
|
||||
// create replacement Varnodes constructed as SUBPIECEs of the new combined Varnode
|
||||
if (otherOpsHi) {
|
||||
BlockBasic *bb = (BlockBasic *)bblocks.getBlock(0);
|
||||
subHi = newOp(2,bb->getStart());
|
||||
opSetOpcode(subHi, CPUI_SUBPIECE);
|
||||
opSetInput(subHi,newConstant(4, vnLo->getSize()),1);
|
||||
Varnode *newHi = newVarnodeOut(vnHi->getSize(),vnHi->getAddr(),subHi);
|
||||
opInsertBegin(subHi, bb);
|
||||
totalReplace(vnHi, newHi);
|
||||
}
|
||||
if (otherOpsLo) {
|
||||
BlockBasic *bb = (BlockBasic *)bblocks.getBlock(0);
|
||||
subLo = newOp(2,bb->getStart());
|
||||
opSetOpcode(subLo, CPUI_SUBPIECE);
|
||||
opSetInput(subLo,newConstant(4, 0),1);
|
||||
Varnode *newLo = newVarnodeOut(vnLo->getSize(),vnLo->getAddr(),subLo);
|
||||
opInsertBegin(subLo, bb);
|
||||
totalReplace(vnHi, newHi);
|
||||
totalReplace(vnLo, newLo);
|
||||
}
|
||||
int4 outSize = vnHi->getSize() + vnLo->getSize();
|
||||
@ -438,10 +447,10 @@ void Funcdata::combineInputVarnodes(Varnode *vnHi,Varnode *vnLo)
|
||||
opSetInput(pieceList[i],inVn,0);
|
||||
opSetOpcode(pieceList[i], CPUI_COPY);
|
||||
}
|
||||
if (otherOps) {
|
||||
if (otherOpsHi)
|
||||
opSetInput(subHi,inVn,0);
|
||||
if (otherOpsLo)
|
||||
opSetInput(subLo,inVn,0);
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct a constant Varnode up to 128 bits, using INT_ZEXT and PIECE if necessary.
|
||||
|
@ -4,9 +4,9 @@
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@ -2792,9 +2792,11 @@ Datatype *CParse::newStruct(const string &ident,vector<TypeDeclarator *> *declis
|
||||
sublist.emplace_back(0,-1,decl->getIdentifier(),decl->buildType(glb));
|
||||
}
|
||||
|
||||
TypeStruct::assignFieldOffsets(sublist);
|
||||
try {
|
||||
glb->types->setFields(sublist,res,-1,-1,0);
|
||||
int4 newSize;
|
||||
int4 newAlign;
|
||||
TypeStruct::assignFieldOffsets(sublist,newSize,newAlign);
|
||||
glb->types->setFields(sublist,res,newSize,newAlign,0);
|
||||
}
|
||||
catch (LowlevelError &err) {
|
||||
setError(err.explain);
|
||||
@ -2830,7 +2832,10 @@ Datatype *CParse::newUnion(const string &ident,vector<TypeDeclarator *> *declist
|
||||
}
|
||||
|
||||
try {
|
||||
glb->types->setFields(sublist,res,-1,-1,0);
|
||||
int4 newSize;
|
||||
int4 newAlign;
|
||||
TypeUnion::assignFieldOffsets(sublist,newSize,newAlign,res);
|
||||
glb->types->setFields(sublist,res,newSize,newAlign,0);
|
||||
}
|
||||
catch (LowlevelError &err) {
|
||||
setError(err.explain);
|
||||
@ -2886,8 +2891,13 @@ Datatype *CParse::newEnum(const string &ident,vector<Enumerator *> *vecenum)
|
||||
vallist.push_back(enumer->value);
|
||||
assignlist.push_back(enumer->constantassigned);
|
||||
}
|
||||
if (!glb->types->setEnumValues(namelist,vallist,assignlist,res)) {
|
||||
setError("Bad enumeration values");
|
||||
try {
|
||||
map<uintb,string> namemap;
|
||||
TypeEnum::assignValues(namemap,namelist,vallist,assignlist,res);
|
||||
glb->types->setEnumValues(namemap, res);
|
||||
}
|
||||
catch (LowlevelError &err) {
|
||||
setError(err.explain);
|
||||
glb->types->destroyType(res);
|
||||
return (Datatype *)0;
|
||||
}
|
||||
|
@ -4,9 +4,9 @@
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@ -1046,9 +1046,11 @@ Datatype *CParse::newStruct(const string &ident,vector<TypeDeclarator *> *declis
|
||||
sublist.emplace_back(0,-1,decl->getIdentifier(),decl->buildType(glb));
|
||||
}
|
||||
|
||||
TypeStruct::assignFieldOffsets(sublist);
|
||||
try {
|
||||
glb->types->setFields(sublist,res,-1,-1,0);
|
||||
int4 newSize;
|
||||
int4 newAlign;
|
||||
TypeStruct::assignFieldOffsets(sublist,newSize,newAlign);
|
||||
glb->types->setFields(sublist,res,newSize,newAlign,0);
|
||||
}
|
||||
catch (LowlevelError &err) {
|
||||
setError(err.explain);
|
||||
@ -1084,7 +1086,10 @@ Datatype *CParse::newUnion(const string &ident,vector<TypeDeclarator *> *declist
|
||||
}
|
||||
|
||||
try {
|
||||
glb->types->setFields(sublist,res,-1,-1,0);
|
||||
int4 newSize;
|
||||
int4 newAlign;
|
||||
TypeUnion::assignFieldOffsets(sublist,newSize,newAlign,res);
|
||||
glb->types->setFields(sublist,res,newSize,newAlign,0);
|
||||
}
|
||||
catch (LowlevelError &err) {
|
||||
setError(err.explain);
|
||||
@ -1140,8 +1145,13 @@ Datatype *CParse::newEnum(const string &ident,vector<Enumerator *> *vecenum)
|
||||
vallist.push_back(enumer->value);
|
||||
assignlist.push_back(enumer->constantassigned);
|
||||
}
|
||||
if (!glb->types->setEnumValues(namelist,vallist,assignlist,res)) {
|
||||
setError("Bad enumeration values");
|
||||
try {
|
||||
map<uintb,string> namemap;
|
||||
TypeEnum::assignValues(namemap,namelist,vallist,assignlist,res);
|
||||
glb->types->setEnumValues(namemap, res);
|
||||
}
|
||||
catch (LowlevelError &err) {
|
||||
setError(err.explain);
|
||||
glb->types->destroyType(res);
|
||||
return (Datatype *)0;
|
||||
}
|
||||
|
@ -4,9 +4,9 @@
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@ -98,6 +98,43 @@ int4 LocationMap::findPass(const Address &addr) const
|
||||
return -1;
|
||||
}
|
||||
|
||||
/// Addresses fed to this method must already be sorted. If the given range intersects the last range in
|
||||
/// the list, the last range is extended to cover it. Otherwise the range is added as a new element to the
|
||||
/// end of the list. The given boolean properties are associated with any new element. If an old element is
|
||||
/// extended, any new boolean properties are ORed into the old element's properties.
|
||||
/// \param addr is the starting address of the given range
|
||||
/// \param size is the number of bytes in the given range
|
||||
/// \param fl are the given boolean properties to associate with the range
|
||||
void TaskList::add(Address addr,int4 size,uint4 fl)
|
||||
|
||||
{
|
||||
if (!tasklist.empty()) {
|
||||
MemRange &entry(tasklist.back());
|
||||
int4 over = addr.overlap(0,entry.addr,entry.size);
|
||||
if (over >= 0) {
|
||||
int4 relsize = size + over;
|
||||
if (relsize > entry.size)
|
||||
entry.size = relsize;
|
||||
entry.flags |= fl;
|
||||
return;
|
||||
}
|
||||
}
|
||||
tasklist.emplace_back(addr,size,fl);
|
||||
}
|
||||
|
||||
/// This can be used to add a range anywhere in the list, but the new range must already be disjoint
|
||||
/// from ranges in the list.
|
||||
/// \param pos is the position in the list before which the new range will be inserted
|
||||
/// \param addr is the starting address of the new range
|
||||
/// \param size is the number of bytes in the new range
|
||||
/// \param fl is the boolean properties to be associated with the new range
|
||||
/// \return an iterator to the new range
|
||||
TaskList::iterator TaskList::insert(iterator pos,Address addr,int4 size,uint4 fl)
|
||||
|
||||
{
|
||||
return tasklist.emplace(pos,addr,size,fl);
|
||||
}
|
||||
|
||||
/// Any basic blocks currently in \b this queue are removed. Space is
|
||||
/// reserved for a new set of prioritized stacks.
|
||||
/// \param maxdepth is the number of stacks to allocate
|
||||
@ -194,13 +231,13 @@ void Heritage::clearInfoList(void)
|
||||
(*iter).reset();
|
||||
}
|
||||
|
||||
/// \brief Remove deprecated CPUI_MULTIEQUAL or CPUI_INDIRECT ops, preparing to re-heritage
|
||||
/// \brief Remove deprecated CPUI_MULTIEQUAL, CPUI_INDIRECT, or CPUI_COPY ops, preparing to re-heritage
|
||||
///
|
||||
/// If a previous Varnode was heritaged through a MULTIEQUAL or INDIRECT op, but now
|
||||
/// a larger range containing the Varnode is being heritaged, we throw away the op,
|
||||
/// letting the data-flow for the new larger range determine the data-flow for the
|
||||
/// old Varnode. The original Varnode is redefined as the output of a SUBPIECE
|
||||
/// of a larger free Varnode.
|
||||
/// of a larger free Varnode. Return form COPYs are simply removed, in preparation for a larger COPY.
|
||||
/// \param remove is the list of Varnodes written by MULTIEQUAL or INDIRECT
|
||||
/// \param addr is the start of the larger range
|
||||
/// \param size is the size of the range
|
||||
@ -233,13 +270,18 @@ void Heritage::removeRevisitedMarkers(const vector<Varnode *> &remove,const Addr
|
||||
else
|
||||
pos = targetOp->getBasicIter();
|
||||
++pos; // Insert SUBPIECE after target of INDIRECT
|
||||
vn->clearAddrForce(); // Replacement INDIRECT will hold the address
|
||||
}
|
||||
else {
|
||||
else if (op->code() == CPUI_MULTIEQUAL) {
|
||||
pos = op->getBasicIter(); // Insert SUBPIECE after all MULTIEQUALs in block
|
||||
++pos;
|
||||
while(pos != bl->endOp() && (*pos)->code() == CPUI_MULTIEQUAL)
|
||||
++pos;
|
||||
}
|
||||
else { // Remove return form COPY
|
||||
fd->opUnlink(op);
|
||||
continue;
|
||||
}
|
||||
int4 offset = vn->overlap(addr,size);
|
||||
fd->opUninsert(op);
|
||||
newInputs.clear();
|
||||
@ -256,48 +298,50 @@ void Heritage::removeRevisitedMarkers(const vector<Varnode *> &remove,const Addr
|
||||
|
||||
/// \brief Collect free reads, writes, and inputs in the given address range
|
||||
///
|
||||
/// \param addr is the starting address of the range
|
||||
/// \param size is the number of bytes in the range
|
||||
/// \param memrange is the given address range
|
||||
/// \param read will hold any read Varnodes in the range
|
||||
/// \param write will hold any written Varnodes
|
||||
/// \param input will hold any input Varnodes
|
||||
/// \param remove will hold any PcodeOps that need to be removed
|
||||
/// \return the maximum size of a write
|
||||
int4 Heritage::collect(Address addr,int4 size,
|
||||
vector<Varnode *> &read,vector<Varnode *> &write,
|
||||
vector<Varnode *> &input,vector<Varnode *> &remove) const
|
||||
|
||||
int4 Heritage::collect(MemRange &memrange,vector<Varnode *> &read,vector<Varnode *> &write,
|
||||
vector<Varnode *> &input,vector<Varnode *> &remove) const
|
||||
{
|
||||
Varnode *vn;
|
||||
VarnodeLocSet::const_iterator viter = fd->beginLoc(addr);
|
||||
read.clear();
|
||||
write.clear();
|
||||
input.clear();
|
||||
remove.clear();
|
||||
VarnodeLocSet::const_iterator enditer;
|
||||
uintb start = addr.getOffset();
|
||||
addr = addr + size;
|
||||
if (addr.getOffset() < start) { // Wraparound
|
||||
Address tmp(addr.getSpace(),addr.getSpace()->getHighest());
|
||||
uintb start = memrange.addr.getOffset();
|
||||
Address endaddr = memrange.addr + memrange.size;
|
||||
if (endaddr.getOffset() < start) { // Wraparound
|
||||
Address tmp(endaddr.getSpace(),endaddr.getSpace()->getHighest());
|
||||
enditer = fd->endLoc(tmp);
|
||||
}
|
||||
else
|
||||
enditer = fd->beginLoc(addr);
|
||||
enditer = fd->beginLoc(endaddr);
|
||||
int4 maxsize = 0;
|
||||
while( viter != enditer ) {
|
||||
vn = *viter;
|
||||
for(VarnodeLocSet::const_iterator viter = fd->beginLoc(memrange.addr); viter != enditer; ++viter) {
|
||||
Varnode *vn = *viter;
|
||||
if (!vn->isWriteMask()) {
|
||||
if (vn->isWritten()) {
|
||||
if (vn->getSize() < size && vn->getDef()->isMarker())
|
||||
remove.push_back(vn);
|
||||
else {
|
||||
if (vn->getSize() > maxsize) // Look for maximum write size
|
||||
maxsize = vn->getSize();
|
||||
write.push_back(vn);
|
||||
PcodeOp *op = vn->getDef();
|
||||
if (op->isMarker() || op->isReturnCopy()) { // Evidence of previous heritage in this range
|
||||
if (vn->getSize() < memrange.size) {
|
||||
remove.push_back(vn);
|
||||
continue;
|
||||
}
|
||||
memrange.clearProperty(MemRange::new_addresses); // Previous pass covered everything
|
||||
}
|
||||
if (vn->getSize() > maxsize) // Look for maximum write size
|
||||
maxsize = vn->getSize();
|
||||
write.push_back(vn);
|
||||
}
|
||||
else if ((!vn->isHeritageKnown())&&(!vn->hasNoDescend()))
|
||||
read.push_back(vn);
|
||||
else if (vn->isInput())
|
||||
input.push_back(vn);
|
||||
}
|
||||
++viter;
|
||||
}
|
||||
return maxsize;
|
||||
}
|
||||
@ -1101,15 +1145,15 @@ void Heritage::reprocessFreeStores(AddrSpace *spc,vector<PcodeOp *> &freeStores)
|
||||
/// The traditional phi-node placement and renaming algorithms don't expect
|
||||
/// variable pairs where there is partial overlap. For the given address range,
|
||||
/// we make all the free Varnode sizes look uniform by adding PIECE and SUBPIECE
|
||||
/// ops. We also add INDIRECT ops, so that we can ignore indirect effects
|
||||
/// of LOAD/STORE/CALL ops.
|
||||
/// ops. If enabled, we also add INDIRECT ops, so that renaming takes into account
|
||||
/// indirect effects of LOAD/STORE/CALL ops.
|
||||
/// \param addr is the starting address of the given range
|
||||
/// \param size is the number of bytes in the given range
|
||||
/// \param guardPerformed is true if a guard has been previously performed on the range
|
||||
/// \param addIndirects is true if a guard has been previously performed on the range
|
||||
/// \param read is the set of Varnode values reading from the range
|
||||
/// \param write is the set of written Varnodes in the range
|
||||
/// \param inputvars is the set of Varnodes in the range already marked as input
|
||||
void Heritage::guard(const Address &addr,int4 size,bool guardPerformed,
|
||||
void Heritage::guard(const Address &addr,int4 size,bool addIndirects,
|
||||
vector<Varnode *> &read,vector<Varnode *> &write,vector<Varnode *> &inputvars)
|
||||
|
||||
{
|
||||
@ -1141,7 +1185,7 @@ void Heritage::guard(const Address &addr,int4 size,bool guardPerformed,
|
||||
// free for an address that has already been guarded before.
|
||||
// Because INDIRECTs for a single CALL or STORE really issue simultaneously, having multiple INDIRECT guards
|
||||
// for the same address confuses the renaming algorithm, so we don't add guards if we've added them before.
|
||||
if (!guardPerformed) {
|
||||
if (addIndirects) {
|
||||
fl = 0;
|
||||
// Query for generic properties of address (use empty usepoint)
|
||||
fd->getScopeLocal()->queryProperties(addr,size,Address(),fl);
|
||||
@ -1639,7 +1683,7 @@ void Heritage::guardReturns(uint4 fl,const Address &addr,int4 size,vector<Varnod
|
||||
vn->setAddrForce();
|
||||
vn->setActiveHeritage();
|
||||
fd->opSetOpcode(copyop,CPUI_COPY);
|
||||
copyop->setStopCopyPropagation();
|
||||
fd->markReturnCopy(copyop);
|
||||
Varnode *invn = fd->newVarnode(size,addr);
|
||||
invn->setActiveHeritage();
|
||||
fd->opSetInput(copyop,invn,0);
|
||||
@ -1656,9 +1700,8 @@ void Heritage::guardReturns(uint4 fl,const Address &addr,int4 size,vector<Varnod
|
||||
/// Varnode.
|
||||
/// \param refine is the refinement array
|
||||
/// \param addr is the starting address of the given range
|
||||
/// \param size is the number of bytes in the range
|
||||
/// \param vnlist is the list of Varnodes to add to the array
|
||||
void Heritage::buildRefinement(vector<int4> &refine,const Address &addr,int4 size,const vector<Varnode *> &vnlist)
|
||||
void Heritage::buildRefinement(vector<int4> &refine,const Address &addr,const vector<Varnode *> &vnlist)
|
||||
|
||||
{
|
||||
for(uint4 i=0;i<vnlist.size();++i) {
|
||||
@ -1838,20 +1881,21 @@ void Heritage::remove13Refinement(vector<int4> &refine)
|
||||
/// \brief Find the common refinement of all reads and writes in the address range
|
||||
///
|
||||
/// Split the reads and writes so they match the refinement.
|
||||
/// \param addr is the first address in the range
|
||||
/// \param size is the number of bytes in the range
|
||||
/// \param memiter points to the address range to be refined
|
||||
/// \param readvars is all \e free Varnodes overlapping the address range
|
||||
/// \param writevars is all written Varnodes overlapping the address range
|
||||
/// \param inputvars is all known input Varnodes overlapping the address range
|
||||
/// \return \b true if there is a non-trivial refinement
|
||||
bool Heritage::refinement(const Address &addr,int4 size,const vector<Varnode *> &readvars,const vector<Varnode *> &writevars,const vector<Varnode *> &inputvars)
|
||||
|
||||
TaskList::iterator Heritage::refinement(TaskList::iterator memiter,const vector<Varnode *> &readvars,
|
||||
const vector<Varnode *> &writevars,const vector<Varnode *> &inputvars)
|
||||
{
|
||||
if (size > 1024) return false;
|
||||
int4 size = (*memiter).size;
|
||||
if (size > 1024) return disjoint.end();
|
||||
Address addr = (*memiter).addr;
|
||||
vector<int4> refine(size+1,0);
|
||||
buildRefinement(refine,addr,size,readvars);
|
||||
buildRefinement(refine,addr,size,writevars);
|
||||
buildRefinement(refine,addr,size,inputvars);
|
||||
buildRefinement(refine,addr,readvars);
|
||||
buildRefinement(refine,addr,writevars);
|
||||
buildRefinement(refine,addr,inputvars);
|
||||
int4 lastpos = 0;
|
||||
for(int4 curpos=1;curpos < size;++curpos) { // Convert boundary points to partition sizes
|
||||
if (refine[curpos] != 0) {
|
||||
@ -1859,7 +1903,7 @@ bool Heritage::refinement(const Address &addr,int4 size,const vector<Varnode *>
|
||||
lastpos = curpos;
|
||||
}
|
||||
}
|
||||
if (lastpos == 0) return false; // No non-trivial refinements
|
||||
if (lastpos == 0) return disjoint.end(); // No non-trivial refinements
|
||||
refine[lastpos] = size-lastpos;
|
||||
remove13Refinement(refine);
|
||||
vector<Varnode *> newvn;
|
||||
@ -1871,22 +1915,26 @@ bool Heritage::refinement(const Address &addr,int4 size,const vector<Varnode *>
|
||||
refineInput(inputvars[i],addr,refine,newvn);
|
||||
|
||||
// Alter the disjoint cover (both locally and globally) to reflect our refinement
|
||||
LocationMap::iterator iter = disjoint.find(addr);
|
||||
int4 addrPass = (*iter).second.pass;
|
||||
disjoint.erase(iter);
|
||||
iter = globaldisjoint.find(addr);
|
||||
uint4 flags = (*memiter).flags;
|
||||
memiter = disjoint.erase(memiter);
|
||||
LocationMap::iterator iter = globaldisjoint.find(addr);
|
||||
int4 curPass = (*iter).second.pass;
|
||||
globaldisjoint.erase(iter);
|
||||
Address curaddr = addr;
|
||||
int4 cut = 0;
|
||||
int4 sz = refine[cut];
|
||||
int4 intersect;
|
||||
TaskList::iterator resiter = disjoint.insert(memiter,addr,sz,flags);
|
||||
globaldisjoint.add(addr,sz,curPass,intersect);
|
||||
cut += sz;
|
||||
addr = addr + sz;
|
||||
while(cut < size) {
|
||||
int4 sz = refine[cut];
|
||||
disjoint.add(curaddr,sz,addrPass,intersect);
|
||||
globaldisjoint.add(curaddr,sz,addrPass,intersect);
|
||||
sz = refine[cut];
|
||||
disjoint.insert(memiter,addr,sz,flags);
|
||||
globaldisjoint.add(addr,sz,curPass,intersect);
|
||||
cut += sz;
|
||||
curaddr = curaddr + sz;
|
||||
addr = addr + sz;
|
||||
}
|
||||
return true;
|
||||
return resiter;
|
||||
}
|
||||
|
||||
/// \brief Make sure existing inputs for the given range fill it entirely
|
||||
@ -2549,52 +2597,43 @@ void Heritage::rename(void)
|
||||
void Heritage::placeMultiequals(void)
|
||||
|
||||
{
|
||||
LocationMap::iterator iter;
|
||||
TaskList::iterator iter;
|
||||
vector<Varnode *> readvars;
|
||||
vector<Varnode *> writevars;
|
||||
vector<Varnode *> inputvars;
|
||||
vector<Varnode *> removevars;
|
||||
|
||||
for(iter=disjoint.begin();iter!=disjoint.end();++iter) {
|
||||
Address addr = (*iter).first;
|
||||
int4 size = (*iter).second.size;
|
||||
bool guardPerformed = (*iter).second.pass < pass;
|
||||
readvars.clear();
|
||||
writevars.clear();
|
||||
inputvars.clear();
|
||||
removevars.clear();
|
||||
int4 max = collect(addr,size,readvars,writevars,inputvars,removevars); // Collect reads/writes
|
||||
if ((size > 4)&&(max < size)) {
|
||||
if (refinement(addr,size,readvars,writevars,inputvars)) {
|
||||
iter = disjoint.find(addr);
|
||||
size =(*iter).second.size;
|
||||
readvars.clear();
|
||||
writevars.clear();
|
||||
inputvars.clear();
|
||||
removevars.clear();
|
||||
collect(addr,size,readvars,writevars,inputvars,removevars);
|
||||
int4 max = collect(*iter,readvars,writevars,inputvars,removevars); // Collect reads/writes
|
||||
if ((*iter).size > 4 && max < (*iter).size) {
|
||||
TaskList::iterator refiter = refinement(iter,readvars,writevars,inputvars);
|
||||
if (refiter != disjoint.end()) {
|
||||
iter = refiter;
|
||||
collect(*iter,readvars,writevars,inputvars,removevars);
|
||||
}
|
||||
}
|
||||
const MemRange &memrange(*iter);
|
||||
int4 size = memrange.size;
|
||||
if (readvars.empty()) {
|
||||
if (writevars.empty() && inputvars.empty()) {
|
||||
continue;
|
||||
}
|
||||
if (addr.getSpace()->getType() == IPTR_INTERNAL || guardPerformed)
|
||||
if (memrange.addr.getSpace()->getType() == IPTR_INTERNAL || memrange.oldAddresses())
|
||||
continue;
|
||||
}
|
||||
if (!removevars.empty())
|
||||
removeRevisitedMarkers(removevars, addr, size);
|
||||
guardInput(addr,size,inputvars);
|
||||
guard(addr,size,guardPerformed,readvars,writevars,inputvars);
|
||||
removeRevisitedMarkers(removevars, memrange.addr, size);
|
||||
guardInput(memrange.addr,size,inputvars);
|
||||
guard(memrange.addr,size,memrange.newAddresses(),readvars,writevars,inputvars);
|
||||
calcMultiequals(writevars); // Calculate where MULTIEQUALs go
|
||||
for(int4 i=0;i<merge.size();++i) {
|
||||
BlockBasic *bl = (BlockBasic *) merge[i];
|
||||
PcodeOp *multiop = fd->newOp(bl->sizeIn(),bl->getStart());
|
||||
Varnode *vnout = fd->newVarnodeOut(size,addr,multiop);
|
||||
Varnode *vnout = fd->newVarnodeOut(size,memrange.addr,multiop);
|
||||
vnout->setActiveHeritage();
|
||||
fd->opSetOpcode(multiop,CPUI_MULTIEQUAL); // Create each MULTIEQUAL
|
||||
for(int4 j=0;j<bl->sizeIn();++j) {
|
||||
Varnode *vnin = fd->newVarnode(size,addr);
|
||||
Varnode *vnin = fd->newVarnode(size,memrange.addr);
|
||||
fd->opSetInput(multiop,vnin,j);
|
||||
}
|
||||
fd->opInsertBegin(multiop,bl); // Insert at beginning of block
|
||||
@ -2666,7 +2705,7 @@ void Heritage::heritage(void)
|
||||
int4 prev = 0;
|
||||
LocationMap::iterator liter = globaldisjoint.add(vn->getAddr(),vn->getSize(),pass,prev);
|
||||
if (prev == 0) // All new location being heritaged, or intersecting with something new
|
||||
disjoint.add((*liter).first,(*liter).second.size,pass,prev);
|
||||
disjoint.add((*liter).first,(*liter).second.size,MemRange::new_addresses);
|
||||
else if (prev==2) { // If completely contained in range from previous pass
|
||||
if (vn->isHeritageKnown()) continue; // Don't heritage if we don't have to
|
||||
if (vn->hasNoDescend()) continue;
|
||||
@ -2675,10 +2714,10 @@ void Heritage::heritage(void)
|
||||
bumpDeadcodeDelay(vn->getSpace());
|
||||
warnvn = vn;
|
||||
}
|
||||
disjoint.add((*liter).first,(*liter).second.size,(*liter).second.pass,prev);
|
||||
disjoint.add((*liter).first,(*liter).second.size,MemRange::old_addresses);
|
||||
}
|
||||
else { // Partially contained in old range, but may contain new stuff
|
||||
disjoint.add((*liter).first,(*liter).second.size,(*liter).second.pass,prev);
|
||||
disjoint.add((*liter).first,(*liter).second.size,MemRange::old_addresses | MemRange::new_addresses);
|
||||
if ((!needwarning)&&(info->deadremoved>0)&&!fd->isJumptableRecoveryOn()) {
|
||||
// TODO: We should check if this varnode is tiled by previously heritaged ranges
|
||||
if (vn->isHeritageKnown()) continue; // Assume that it is tiled and produced by merging
|
||||
|
@ -5,9 +5,9 @@
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@ -28,12 +28,6 @@ namespace ghidra {
|
||||
/// range (indexed by its initial address) maps to its own Varnode stack.
|
||||
typedef map<Address,vector<Varnode *> > VariableStack;
|
||||
|
||||
/// \brief Label for describing extent of address range that has been heritaged
|
||||
struct SizePass {
|
||||
int4 size; ///< Size of the range (in bytes)
|
||||
int4 pass; ///< Pass when the range was heritaged
|
||||
};
|
||||
|
||||
/// \brief Map object for keeping track of which address ranges have been heritaged
|
||||
///
|
||||
/// We keep track of a fairly fine grained description of when each address range
|
||||
@ -43,6 +37,11 @@ struct SizePass {
|
||||
/// that informs the caller whether the address has been heritaged and if so in which pass.
|
||||
class LocationMap {
|
||||
public:
|
||||
/// \brief Label for describing extent of address range that has been heritaged
|
||||
struct SizePass {
|
||||
int4 size; ///< Size of the range (in bytes)
|
||||
int4 pass; ///< Pass when the range was heritaged
|
||||
};
|
||||
/// Iterator into the main map
|
||||
typedef map<Address,SizePass>::iterator iterator;
|
||||
private:
|
||||
@ -51,12 +50,48 @@ public:
|
||||
iterator add(Address addr,int4 size,int4 pass,int4 &intersect); ///< Mark new address as \b heritaged
|
||||
iterator find(const Address &addr); ///< Look up if/how given address was heritaged
|
||||
int4 findPass(const Address &addr) const; ///< Look up if/how given address was heritaged
|
||||
void erase(iterator iter) { themap.erase(iter); } ///< Remove a particular entry from the map
|
||||
iterator erase(iterator iter) { return themap.erase(iter); } ///< Remove a particular entry from the map
|
||||
iterator begin(void) { return themap.begin(); } ///< Get starting iterator over heritaged ranges
|
||||
iterator end(void) { return themap.end(); } ///< Get ending iterator over heritaged ranges
|
||||
void clear(void) { themap.clear(); } ///< Clear the map of heritaged ranges
|
||||
};
|
||||
|
||||
/// \brief An address range to be processed
|
||||
struct MemRange {
|
||||
/// Properties of a single address range
|
||||
enum {
|
||||
new_addresses = 1, ///< The range covers addresses that have not been seen in previous passes
|
||||
old_addresses = 2 ///< The range covers addresses that were seen in previous passes
|
||||
};
|
||||
Address addr; ///< Starting address of the range
|
||||
int4 size; ///< Number of bytes in the range
|
||||
uint4 flags; ///< Property flags
|
||||
MemRange(const Address &ad,int4 sz,uint4 fl) : addr(ad) { size = sz; flags = fl; } ///< Constructor
|
||||
bool newAddresses(void) const { return ((flags & new_addresses)!=0); } ///< Does \b this range cover new addresses?
|
||||
bool oldAddresses(void) const { return ((flags & old_addresses)!=0); } ///< Does \b this range cover old addresses?
|
||||
void clearProperty(uint4 val) { flags &= ~val; } ///< Clear specific properties from the memory range
|
||||
};
|
||||
|
||||
/// \brief A list of address ranges that need to be converted to SSA form
|
||||
///
|
||||
/// The disjoint list of ranges are built up and processed in a single pass. The container is fed a list
|
||||
/// of ranges that may be overlapping but are already in address order. It constructs a disjoint list by taking
|
||||
/// the union of overlapping ranges.
|
||||
class TaskList {
|
||||
public:
|
||||
/// Iterator in the list
|
||||
typedef list<MemRange>::iterator iterator;
|
||||
private:
|
||||
list<MemRange> tasklist; ///< List of address ranges that needs to be processed
|
||||
public:
|
||||
void add(Address addr,int4 size,uint4 fl); ///< Add a range to the list
|
||||
iterator insert(iterator pos,Address addr,int4 size,uint4 fl); ///< Insert a disjoint range in the list
|
||||
iterator erase(iterator iter) { return tasklist.erase(iter); } ///< Remove a particular range
|
||||
iterator begin(void) { return tasklist.begin(); } ///< Get iterator to beginning of \b this list
|
||||
iterator end(void) { return tasklist.end(); } ///< Get iterator to end of \b this list
|
||||
void clear(void) { tasklist.clear(); } ///< Clear all ranges in the list
|
||||
};
|
||||
|
||||
/// \brief Priority queue for the phi-node (MULTIEQUAL) placement algorithm
|
||||
///
|
||||
/// A \e work-list for basic blocks used during phi-node placement. Implemented as
|
||||
@ -202,7 +237,7 @@ class Heritage {
|
||||
|
||||
Funcdata *fd; ///< The function \b this is controlling SSA construction
|
||||
LocationMap globaldisjoint; ///< Disjoint cover of every heritaged memory location
|
||||
LocationMap disjoint; ///< Disjoint cover of memory locations currently being heritaged
|
||||
TaskList disjoint; ///< Disjoint cover of memory locations currently being heritaged
|
||||
vector<vector<FlowBlock *> > domchild; ///< Parent->child edges in dominator tree
|
||||
vector<vector<FlowBlock *> > augment; ///< Augmented edges
|
||||
vector<uint4> flags; ///< Block properties for phi-node placement algorithm
|
||||
@ -233,7 +268,7 @@ class Heritage {
|
||||
void processJoins(void);
|
||||
void buildADT(void); ///< Build the augmented dominator tree
|
||||
void removeRevisitedMarkers(const vector<Varnode *> &remove,const Address &addr,int4 size);
|
||||
int4 collect(Address addr,int4 size,vector<Varnode *> &read,vector<Varnode *> &write,vector<Varnode *> &input,vector<Varnode *> &remove) const;
|
||||
int4 collect(MemRange &memrange,vector<Varnode *> &read,vector<Varnode *> &write,vector<Varnode *> &input,vector<Varnode *> &remove) const;
|
||||
bool callOpIndirectEffect(const Address &addr,int4 size,PcodeOp *op) const;
|
||||
Varnode *normalizeReadSize(Varnode *vn,PcodeOp *op,const Address &addr,int4 size);
|
||||
Varnode *normalizeWriteSize(Varnode *vn,const Address &addr,int4 size);
|
||||
@ -264,13 +299,14 @@ class Heritage {
|
||||
void guardLoads(uint4 fl,const Address &addr,int4 size,vector<Varnode *> &write);
|
||||
void guardReturnsOverlapping(const Address &addr,int4 size);
|
||||
void guardReturns(uint4 fl,const Address &addr,int4 size,vector<Varnode *> &write);
|
||||
static void buildRefinement(vector<int4> &refine,const Address &addr,int4 size,const vector<Varnode *> &vnlist);
|
||||
static void buildRefinement(vector<int4> &refine,const Address &addr,const vector<Varnode *> &vnlist);
|
||||
void splitByRefinement(Varnode *vn,const Address &addr,const vector<int4> &refine,vector<Varnode *> &split);
|
||||
void refineRead(Varnode *vn,const Address &addr,const vector<int4> &refine,vector<Varnode *> &newvn);
|
||||
void refineWrite(Varnode *vn,const Address &addr,const vector<int4> &refine,vector<Varnode *> &newvn);
|
||||
void refineInput(Varnode *vn,const Address &addr,const vector<int4> &refine,vector<Varnode *> &newvn);
|
||||
void remove13Refinement(vector<int4> &refine);
|
||||
bool refinement(const Address &addr,int4 size,const vector<Varnode *> &readvars,const vector<Varnode *> &writevars,const vector<Varnode *> &inputvars);
|
||||
TaskList::iterator refinement(TaskList::iterator memiter,const vector<Varnode *> &readvars,
|
||||
const vector<Varnode *> &writevars,const vector<Varnode *> &inputvars);
|
||||
void visitIncr(FlowBlock *qnode,FlowBlock *vnode);
|
||||
void calcMultiequals(const vector<Varnode *> &write);
|
||||
void renameRecurse(BlockBasic *bl,VariableStack &varstack);
|
||||
|
@ -4,9 +4,9 @@
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@ -1586,24 +1586,22 @@ void Merge::clear(void)
|
||||
stackAffectingOps.clear();
|
||||
}
|
||||
|
||||
/// \brief Inflate the Cover of a given Varnode with a HighVariable
|
||||
/// \brief Mark the given Varnode as \e implied
|
||||
///
|
||||
/// An expression involving a HighVariable can be propagated to all the read sites of the
|
||||
/// output Varnode of the expression if the Varnode Cover can be \b inflated to include the
|
||||
/// Cover of the HighVariable, even though the Varnode is not part of the HighVariable.
|
||||
/// This routine performs the inflation, assuming an intersection test is already performed.
|
||||
/// \param a is the given Varnode to inflate
|
||||
/// \param high is the HighVariable to inflate with
|
||||
void Merge::inflate(Varnode *a,HighVariable *high)
|
||||
/// The covers of the immediate Varnodes involved in the expression are marked as dirty.
|
||||
/// This assumes covers for the whole expression are ultimately marked because all its internal Varnodes
|
||||
/// are passed to this method.
|
||||
/// \param vn is the given Varnode being marked as \e implied
|
||||
void Merge::markImplied(Varnode *vn)
|
||||
|
||||
{
|
||||
testCache.updateHigh(a->getHigh());
|
||||
testCache.updateHigh(high);
|
||||
for(int4 i=0;i<high->numInstances();++i) {
|
||||
Varnode *b = high->getInstance(i);
|
||||
a->cover->merge(*b->cover);
|
||||
vn->setImplied(); // Mark as implied
|
||||
PcodeOp *op = vn->getDef();
|
||||
for(int4 i=0;i<op->numInput();++i) {
|
||||
Varnode *defvn = op->getIn(i);
|
||||
if (!defvn->hasCover()) continue;
|
||||
defvn->setFlags(Varnode::coverdirty);
|
||||
}
|
||||
a->getHigh()->coverDirty();
|
||||
}
|
||||
|
||||
/// \brief Test if we can inflate the Cover of the given Varnode without incurring intersections
|
||||
|
@ -4,9 +4,9 @@
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@ -117,8 +117,8 @@ class Merge {
|
||||
public:
|
||||
Merge(Funcdata &fd) : data(fd), stackAffectingOps(fd), testCache(stackAffectingOps) {} ///< Construct given a specific function
|
||||
void clear(void);
|
||||
static void markImplied(Varnode *vn);
|
||||
bool inflateTest(Varnode *a,HighVariable *high);
|
||||
void inflate(Varnode *a,HighVariable *high);
|
||||
bool mergeTest(HighVariable *high,vector<HighVariable *> &tmplist);
|
||||
|
||||
void mergeOpcode(OpCode opc);
|
||||
|
@ -279,7 +279,7 @@ void PcodeOp::setOpcode(TypeOp *t_op)
|
||||
flags &= ~(PcodeOp::branch | PcodeOp::call | PcodeOp::coderef | PcodeOp::commutative |
|
||||
PcodeOp::returns | PcodeOp::nocollapse | PcodeOp::marker | PcodeOp::booloutput |
|
||||
PcodeOp::unary | PcodeOp::binary | PcodeOp::ternary | PcodeOp::special |
|
||||
PcodeOp::has_callspec | PcodeOp::no_copy_propagation);
|
||||
PcodeOp::has_callspec | PcodeOp::return_copy);
|
||||
opcode = t_op;
|
||||
flags |= t_op->getFlags();
|
||||
}
|
||||
|
@ -4,9 +4,9 @@
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@ -90,7 +90,7 @@ public:
|
||||
binary = 0x10000, ///< Evaluate as binary expression
|
||||
special = 0x20000, ///< Cannot be evaluated (without special processing)
|
||||
ternary = 0x40000, ///< Evaluate as ternary operator (or higher)
|
||||
no_copy_propagation = 0x80000, ///< Op does not allow COPY propagation through its inputs
|
||||
return_copy = 0x80000, ///< Special form of COPY op for holding global values to (past) the end of the function
|
||||
nonprinting = 0x100000, ///< Op should not be directly printed as source
|
||||
halt = 0x200000, ///< instruction causes processor or process to halt
|
||||
badinstruction = 0x400000, ///< placeholder for bad instruction data
|
||||
@ -218,8 +218,7 @@ public:
|
||||
void setHoldOutput(void) { addlflags |= hold_output; } ///< Prevent output from being removed as dead code
|
||||
bool isPartialRoot(void) const { return ((addlflags&concat_root)!=0); } ///< Output is root of CONCAT tree
|
||||
void setPartialRoot(void) { addlflags |= concat_root; } ///< Mark \b this as root of CONCAT tree
|
||||
bool stopsCopyPropagation(void) const { return ((flags&no_copy_propagation)!=0); } ///< Does \b this allow COPY propagation
|
||||
void setStopCopyPropagation(void) { flags |= no_copy_propagation; } ///< Stop COPY propagation through inputs
|
||||
bool isReturnCopy(void) const { return ((flags&return_copy)!=0); } ///< Is \b this a \e return form COPY
|
||||
bool noIndirectCollapse(void) const { return ((addlflags & no_indirect_collapse)!=0); } ///< Check if INDIRECT collapse is possible
|
||||
void setNoIndirectCollapse(void) { addlflags |= no_indirect_collapse; } ///< Prevent collapse of INDIRECT
|
||||
bool isStoreUnmapped(void) const { return ((addlflags & store_unmapped)!=0); } ///< Is STORE location supposed to be unmapped
|
||||
|
@ -204,7 +204,7 @@ int4 RuleSelectCse::applyOp(PcodeOp *op,Funcdata &data)
|
||||
list.push_back(pair<uintm,PcodeOp *>(hash,otherop));
|
||||
}
|
||||
if (list.size()<=1) return 0;
|
||||
cseEliminateList(data,list,vlist);
|
||||
data.cseEliminateList(list,vlist);
|
||||
if (vlist.empty()) return 0;
|
||||
return 1;
|
||||
}
|
||||
@ -1048,7 +1048,7 @@ PcodeOp *RulePushMulti::findSubstitute(Varnode *in1,Varnode *in2,BlockBasic *bb,
|
||||
Varnode *vn = op1->getIn(i);
|
||||
if (vn->isConstant()) continue;
|
||||
if (vn == op2->getIn(i)) // Find matching inputs to op1 and op2,
|
||||
return cseFindInBlock(op1,vn,bb,earliest); // search for cse of op1 in bb
|
||||
return Funcdata::cseFindInBlock(op1,vn,bb,earliest); // search for cse of op1 in bb
|
||||
}
|
||||
|
||||
return (PcodeOp *)0;
|
||||
@ -1087,7 +1087,7 @@ int4 RulePushMulti::applyOp(PcodeOp *op,Funcdata &data)
|
||||
if (op1->code() == CPUI_SUBPIECE) return 0; // SUBPIECE is pulled not pushed
|
||||
|
||||
BlockBasic *bl = op->getParent();
|
||||
PcodeOp *earliest = earliestUseInBlock(op->getOut(),bl);
|
||||
PcodeOp *earliest = bl->earliestUse(op->getOut());
|
||||
if (op1->code() == CPUI_COPY) { // Special case of MERGE of 2 shadowing varnodes
|
||||
if (res==0) return 0;
|
||||
PcodeOp *substitute = findSubstitute(buf1[0],buf2[0],bl,earliest);
|
||||
@ -3036,13 +3036,13 @@ int4 RuleMultiCollapse::applyOp(PcodeOp *op,Funcdata &data)
|
||||
copyr->clearMark();
|
||||
op = copyr->getDef();
|
||||
if (func_eq) { // We have only functional equality
|
||||
PcodeOp *earliest = earliestUseInBlock(op->getOut(),op->getParent());
|
||||
PcodeOp *earliest = op->getParent()->earliestUse(op->getOut());
|
||||
newop = defcopyr->getDef(); // We must copy newop (defcopyr)
|
||||
PcodeOp *substitute = (PcodeOp *)0;
|
||||
for(int4 i=0;i<newop->numInput();++i) {
|
||||
Varnode *invn = newop->getIn(i);
|
||||
if (!invn->isConstant()) {
|
||||
substitute = cseFindInBlock(newop,invn,op->getParent(),earliest); // Has newop already been copied in this block
|
||||
substitute = Funcdata::cseFindInBlock(newop,invn,op->getParent(),earliest); // Has newop already been copied in this block
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -3606,7 +3606,7 @@ int4 RulePropagateCopy::applyOp(PcodeOp *op,Funcdata &data)
|
||||
PcodeOp *copyop;
|
||||
Varnode *vn,*invn;
|
||||
|
||||
if (op->stopsCopyPropagation()) return 0;
|
||||
if (op->isReturnCopy()) return 0;
|
||||
for(i=0;i<op->numInput();++i) {
|
||||
vn = op->getIn(i);
|
||||
if (!vn->isWritten()) continue; // Varnode must be written to
|
||||
@ -6627,13 +6627,13 @@ void RulePtrsubUndo::getOpList(vector<uint4> &oplist) const
|
||||
/// the sum of all the constants. Additionally pass back the biggest constant coefficient, for any term
|
||||
/// formed with INT_MULT.
|
||||
/// \param vn is the given root Varnode of the additive tree
|
||||
/// \param multiplier will hold the biggest constant coefficient
|
||||
/// \param multiplier will hold the biggest constant multiplier or 0, if no multiplier is present
|
||||
/// \param maxLevel is the maximum depth to search in the tree
|
||||
/// \return the sum of all constants in the additive expression
|
||||
int8 RulePtrsubUndo::getConstOffsetBack(Varnode *vn,int8 &multiplier,int4 maxLevel)
|
||||
|
||||
{
|
||||
multiplier = 1;
|
||||
multiplier = 0;
|
||||
int8 submultiplier;
|
||||
if (vn->isConstant())
|
||||
return vn->getOffset();
|
||||
@ -6658,7 +6658,8 @@ int8 RulePtrsubUndo::getConstOffsetBack(Varnode *vn,int8 &multiplier,int4 maxLev
|
||||
if (!cvn->isConstant()) return 0;
|
||||
multiplier = cvn->getOffset();
|
||||
getConstOffsetBack(op->getIn(0), submultiplier, maxLevel);
|
||||
multiplier *= submultiplier; // Only contribute to the multiplier
|
||||
if (submultiplier > 0)
|
||||
multiplier *= submultiplier; // Only contribute to the multiplier
|
||||
}
|
||||
return retval;
|
||||
}
|
||||
@ -6669,12 +6670,12 @@ int8 RulePtrsubUndo::getConstOffsetBack(Varnode *vn,int8 &multiplier,int4 maxLev
|
||||
/// constant value being added to the PTRSUB. Additionally pass back the biggest constant coefficient of any
|
||||
/// multiplicative term in the expression.
|
||||
/// \param op is the given PTRSUB
|
||||
/// \param multiplier will hold the biggest multiplicative coefficient
|
||||
/// \param multiplier will hold the biggest multiplicative coefficient or 0, if no INT_MULT or PTRADD is present.
|
||||
int8 RulePtrsubUndo::getExtraOffset(PcodeOp *op,int8 &multiplier)
|
||||
|
||||
{
|
||||
int8 extra = 0;
|
||||
multiplier = 1;
|
||||
multiplier = 0;
|
||||
int8 submultiplier;
|
||||
Varnode *outvn = op->getOut();
|
||||
op = outvn->loneDescend();
|
||||
@ -6696,9 +6697,10 @@ int8 RulePtrsubUndo::getExtraOffset(PcodeOp *op,int8 &multiplier)
|
||||
if (invn->isConstant()) // Only contribute to the extra
|
||||
extra += ptraddmult * (int8)invn->getOffset(); // if the index is constant
|
||||
getConstOffsetBack(invn,submultiplier,DEPTH_LIMIT); // otherwise just contribute to multiplier
|
||||
submultiplier *= ptraddmult;
|
||||
if (submultiplier > multiplier)
|
||||
multiplier = submultiplier;
|
||||
if (submultiplier != 0)
|
||||
ptraddmult *= submultiplier;
|
||||
if (ptraddmult > multiplier)
|
||||
multiplier = ptraddmult;
|
||||
}
|
||||
else {
|
||||
break;
|
||||
|
@ -1797,21 +1797,33 @@ bool SplitFlow::doTrace(void)
|
||||
return true;
|
||||
}
|
||||
|
||||
/// If \b pointer Varnode is written by an INT_ADD, PTRSUB, or PTRADD from a another pointer
|
||||
/// to a structure or array, update \b pointer Varnode, \b baseOffset, and \b ptrType to this.
|
||||
/// If \b pointer Varnode is written by a COPY, INT_ADD, PTRSUB, or PTRADD from another pointer to a
|
||||
/// - structure
|
||||
/// - array OR
|
||||
/// - to an implied array with the given base type
|
||||
///
|
||||
/// then update \b pointer Varnode, \b baseOffset, and \b ptrType to this.
|
||||
/// \param impliedBase if non-null is the allowed element data-type for an implied array
|
||||
/// \return \b true if \b pointer was successfully updated
|
||||
bool SplitDatatype::RootPointer::backUpPointer(Datatype *impliedBase)
|
||||
|
||||
{
|
||||
if (!pointer->isWritten())
|
||||
return false;
|
||||
int4 off;
|
||||
PcodeOp *addOp = pointer->getDef();
|
||||
OpCode opc = addOp->code();
|
||||
if (opc != CPUI_PTRSUB && opc != CPUI_INT_ADD && opc != CPUI_PTRADD)
|
||||
return false;
|
||||
Varnode *cvn = addOp->getIn(1);
|
||||
if (!cvn->isConstant())
|
||||
if (opc == CPUI_PTRSUB || opc == CPUI_INT_ADD || opc == CPUI_PTRADD) {
|
||||
Varnode *cvn = addOp->getIn(1);
|
||||
if (!cvn->isConstant())
|
||||
return false;
|
||||
off = (int4)cvn->getOffset();
|
||||
}
|
||||
else if (opc == CPUI_COPY)
|
||||
off = 0;
|
||||
else {
|
||||
return false;
|
||||
}
|
||||
Varnode *tmpPointer = addOp->getIn(0);
|
||||
Datatype *ct = tmpPointer->getTypeReadFacing(addOp);
|
||||
if (ct->getMetatype() != TYPE_PTR)
|
||||
@ -1819,11 +1831,10 @@ bool SplitDatatype::RootPointer::backUpPointer(Datatype *impliedBase)
|
||||
Datatype *parent = ((TypePointer *)ct)->getPtrTo();
|
||||
type_metatype meta = parent->getMetatype();
|
||||
if (meta != TYPE_STRUCT && meta != TYPE_ARRAY) {
|
||||
if (opc != CPUI_PTRADD || parent != impliedBase)
|
||||
if ((opc != CPUI_PTRADD && opc != CPUI_COPY) || parent != impliedBase)
|
||||
return false;
|
||||
}
|
||||
ptrType = (TypePointer *)ct;
|
||||
int4 off = (int4)cvn->getOffset();
|
||||
if (opc == CPUI_PTRADD)
|
||||
off *= (int4)addOp->getIn(2)->getOffset();
|
||||
off = AddrSpace::addressToByteInt(off, ptrType->getWordSize());
|
||||
@ -1832,10 +1843,11 @@ bool SplitDatatype::RootPointer::backUpPointer(Datatype *impliedBase)
|
||||
return true;
|
||||
}
|
||||
|
||||
/// The LOAD or STORE pointer Varnode is examined. If it is a pointer to the given data-type, the
|
||||
/// root \b pointer is returned. If not, we try to recursively walk back through either PTRSUB or INT_ADD instructions,
|
||||
/// until a pointer Varnode matching the data-type is found. Any accumulated offset, relative to the original
|
||||
/// LOAD or STORE pointer is recorded in the \b baseOffset. If a matching pointer is not found, \b false is returned.
|
||||
/// We search for a pointer to the specified data-type starting with the LOAD/STORE. If we don't immediately
|
||||
/// find it, we back up one level (through a PTRSUB, PTRADD, or INT_ADD). If it isn't found after 1 hop,
|
||||
/// \b false is returned. Once this pointer is found, we back up through any single path of nested TYPE_STRUCT
|
||||
/// and TYPE_ARRAY offsets to establish the final root \b pointer, and \b true is returned. Any accumulated offset,
|
||||
/// relative to the original LOAD or STORE pointer is recorded in the \b baseOffset.
|
||||
/// \param op is the LOAD or STORE
|
||||
/// \param valueType is the specific data-type to match
|
||||
/// \return \b true if the root pointer is found
|
||||
@ -1843,11 +1855,11 @@ bool SplitDatatype::RootPointer::find(PcodeOp *op,Datatype *valueType)
|
||||
|
||||
{
|
||||
Datatype *impliedBase = (Datatype *)0;
|
||||
if (valueType->getMetatype() == TYPE_PARTIALSTRUCT)
|
||||
if (valueType->getMetatype() == TYPE_PARTIALSTRUCT) // Strip off partial to get containing struct or array
|
||||
valueType = ((TypePartialStruct *)valueType)->getParent();
|
||||
else if (valueType->getMetatype() == TYPE_ARRAY) {
|
||||
if (valueType->getMetatype() == TYPE_ARRAY) { // If the data-type is an array
|
||||
valueType = ((TypeArray *)valueType)->getBase();
|
||||
impliedBase = valueType;
|
||||
impliedBase = valueType; // we allow an implied array (pointer to element) as a match
|
||||
}
|
||||
loadStore = op;
|
||||
baseOffset = 0;
|
||||
@ -1864,6 +1876,7 @@ bool SplitDatatype::RootPointer::find(PcodeOp *op,Datatype *valueType)
|
||||
if (ptrType->getPtrTo() != valueType)
|
||||
return false;
|
||||
}
|
||||
// The required pointer is found. We try to back up to pointers to containing structures or arrays
|
||||
for(int4 i=0;i<3;++i) {
|
||||
if (pointer->isAddrTied() || pointer->loneDescend() == (PcodeOp *)0) break;
|
||||
if (!backUpPointer(impliedBase))
|
||||
@ -1872,6 +1885,19 @@ bool SplitDatatype::RootPointer::find(PcodeOp *op,Datatype *valueType)
|
||||
return true;
|
||||
}
|
||||
|
||||
/// Add a COPY op from the \b pointer Varnode to temporary register and make it the new root \b pointer.
|
||||
/// This guarantees that the \b pointer Varnode will not be modified by subsequent STOREs and
|
||||
/// can be implicit in the expressions.
|
||||
/// \param data is the containing function
|
||||
/// \param followOp is the point where the COPY should be inserted
|
||||
void SplitDatatype::RootPointer::duplicateToTemp(Funcdata &data,PcodeOp *followOp)
|
||||
|
||||
{
|
||||
Varnode *newRoot = data.buildCopyTemp(pointer, followOp);
|
||||
newRoot->updateType(ptrType, false, false);
|
||||
pointer = newRoot;
|
||||
}
|
||||
|
||||
/// If the pointer Varnode is no longer used, recursively check and remove the op producing it,
|
||||
/// which will be either an INT_ADD or PTRSUB, until the root \b pointer is reached or
|
||||
/// a Varnode still being used is encountered.
|
||||
@ -1920,8 +1946,9 @@ Datatype *SplitDatatype::getComponent(Datatype *ct,int4 offset,bool &isHole)
|
||||
|
||||
/// For the given data-type, taking into account configuration options, return:
|
||||
/// - -1 for not splittable
|
||||
/// - 0 for data-type that needs to be split
|
||||
/// - 1 for data-type that can be split multiple ways
|
||||
/// - 0 for struct based data-type that needs to be split
|
||||
/// - 1 for array based data-type that needs to be split
|
||||
/// - 2 for primitive data-type that can be split multiple ways
|
||||
/// \param ct is the given data-type
|
||||
/// \return the categorization
|
||||
int4 SplitDatatype::categorizeDatatype(Datatype *ct)
|
||||
@ -1933,18 +1960,18 @@ int4 SplitDatatype::categorizeDatatype(Datatype *ct)
|
||||
if (!splitArrays) break;
|
||||
subType = ((TypeArray *)ct)->getBase();
|
||||
if (subType->getMetatype() != TYPE_UNKNOWN || subType->getSize() != 1)
|
||||
return 0;
|
||||
return 1;
|
||||
else
|
||||
return 1; // unknown1 array does not need splitting and acts as (large) primitive
|
||||
return 2; // unknown1 array does not need splitting and acts as (large) primitive
|
||||
case TYPE_PARTIALSTRUCT:
|
||||
subType = ((TypePartialStruct *)ct)->getParent();
|
||||
if (subType->getMetatype() == TYPE_ARRAY) {
|
||||
if (!splitArrays) break;
|
||||
subType = ((TypeArray *)subType)->getBase();
|
||||
if (subType->getMetatype() != TYPE_UNKNOWN || subType->getSize() != 1)
|
||||
return 0;
|
||||
return 1;
|
||||
else
|
||||
return 1; // unknown1 array does not need splitting and acts as (large) primitive
|
||||
return 2; // unknown1 array does not need splitting and acts as (large) primitive
|
||||
}
|
||||
else if (subType->getMetatype() == TYPE_STRUCT) {
|
||||
if (!splitStructures) break;
|
||||
@ -1959,7 +1986,7 @@ int4 SplitDatatype::categorizeDatatype(Datatype *ct)
|
||||
case TYPE_INT:
|
||||
case TYPE_UINT:
|
||||
case TYPE_UNKNOWN:
|
||||
return 1;
|
||||
return 2;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -1985,22 +2012,21 @@ bool SplitDatatype::testDatatypeCompatibility(Datatype *inBase,Datatype *outBase
|
||||
int4 outCategory = categorizeDatatype(outBase);
|
||||
if (outCategory < 0)
|
||||
return false;
|
||||
if (outCategory != 0 && inCategory != 0)
|
||||
if (outCategory == 2 && inCategory == 2)
|
||||
return false;
|
||||
if (!inConstant && inBase == outBase && inBase->getMetatype() == TYPE_STRUCT)
|
||||
return false; // Don't split a whole structure unless it is getting initialized from a constant
|
||||
if (isLoadStore && outCategory == 1 && inBase->getMetatype() == TYPE_ARRAY)
|
||||
if (isLoadStore && outCategory == 2 && inCategory == 1)
|
||||
return false; // Don't split array pointer writing into primitive
|
||||
if (isLoadStore && inCategory == 1 && !inConstant && outBase->getMetatype() == TYPE_ARRAY)
|
||||
if (isLoadStore && inCategory == 2 && !inConstant && outCategory == 1)
|
||||
return false; // Don't split primitive into an array pointer, TODO: We could check if primitive is defined by PIECE
|
||||
if (isLoadStore && inCategory == 0 && outCategory == 0 && !inConstant &&
|
||||
inBase->getMetatype() == TYPE_ARRAY && outBase->getMetatype() == TYPE_ARRAY)
|
||||
if (isLoadStore && inCategory == 1 && outCategory == 1 && !inConstant)
|
||||
return false; // Don't split copies between arrays
|
||||
bool inHole;
|
||||
bool outHole;
|
||||
int4 curOff = 0;
|
||||
int4 sizeLeft = inBase->getSize();
|
||||
if (inCategory == 1) {
|
||||
if (inCategory == 2) { // If input is primitive
|
||||
while(sizeLeft > 0) {
|
||||
Datatype *curOut = getComponent(outBase,curOff,outHole);
|
||||
if (curOut == (Datatype *)0) return false;
|
||||
@ -2017,7 +2043,7 @@ bool SplitDatatype::testDatatypeCompatibility(Datatype *inBase,Datatype *outBase
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (outCategory == 1) {
|
||||
else if (outCategory == 2) { // If output is primitive
|
||||
while(sizeLeft > 0) {
|
||||
Datatype *curIn = getComponent(inBase,curOff,inHole);
|
||||
if (curIn == (Datatype *)0) return false;
|
||||
@ -2555,6 +2581,8 @@ bool SplitDatatype::splitStore(PcodeOp *storeOp,Datatype *outType)
|
||||
buildInSubpieces(inVn,storeOp,inVarnodes);
|
||||
|
||||
vector<Varnode *> storePtrs;
|
||||
if (storeRoot.pointer->isAddrTied())
|
||||
storeRoot.duplicateToTemp(data, storeOp);
|
||||
buildPointers(storeRoot.pointer, storeRoot.ptrType, storeRoot.baseOffset, storeOp, storePtrs, false);
|
||||
// Preserve original STORE object, so that INDIRECT references are still valid
|
||||
// but convert it into the first of the smaller STOREs
|
||||
|
@ -178,6 +178,7 @@ class SplitDatatype {
|
||||
bool backUpPointer(Datatype *impliedBase); ///< Follow flow of \b pointer back thru INT_ADD or PTRSUB
|
||||
public:
|
||||
bool find(PcodeOp *op,Datatype *valueType); ///< Locate root pointer for underlying LOAD or STORE
|
||||
void duplicateToTemp(Funcdata &data,PcodeOp *followOp); ///< COPY the root varnode into a temp register
|
||||
void freePointerChain(Funcdata &data); ///< Remove unused pointer calculations
|
||||
};
|
||||
Funcdata &data; ///< The containing function
|
||||
|
@ -513,16 +513,17 @@ void Datatype::encodeTypedef(Encoder &encoder) const
|
||||
encoder.closeElement(ELEM_DEF);
|
||||
}
|
||||
|
||||
/// Calculate \b size rounded up to be a multiple of \b alignment.
|
||||
/// This value is returned by getAlignSize().
|
||||
void Datatype::calcAlignSize(void)
|
||||
/// Calculate size rounded up to be a multiple of \b align.
|
||||
/// \param sz is the number of bytes in the data-type before padding
|
||||
/// \param align is the alignment of the data-type
|
||||
/// \return the aligned size
|
||||
int4 Datatype::calcAlignSize(int4 sz,int4 align)
|
||||
|
||||
{
|
||||
int4 mod = size % alignment;
|
||||
int4 mod = sz % align;
|
||||
if (mod != 0)
|
||||
alignSize = size + (alignment - mod);
|
||||
else
|
||||
alignSize = size;
|
||||
return sz + (align - mod);
|
||||
return sz;
|
||||
}
|
||||
|
||||
/// A CPUI_PTRSUB must act on a pointer data-type where the given offset addresses a component.
|
||||
@ -1485,13 +1486,15 @@ void TypeEnum::encode(Encoder &encoder) const
|
||||
/// Parse a \<type> element with children describing each specific enumeration value.
|
||||
/// \param decoder is the stream decoder
|
||||
/// \param typegrp is the factory owning \b this data-type
|
||||
void TypeEnum::decode(Decoder &decoder,TypeFactory &typegrp)
|
||||
/// \return any warning associated with the enum
|
||||
string TypeEnum::decode(Decoder &decoder,TypeFactory &typegrp)
|
||||
|
||||
{
|
||||
// uint4 elemId = decoder.openElement();
|
||||
decodeBasic(decoder);
|
||||
submeta = (metatype == TYPE_INT) ? SUB_INT_ENUM : SUB_UINT_ENUM;
|
||||
map<uintb,string> nmap;
|
||||
string warning;
|
||||
|
||||
for(;;) {
|
||||
uint4 childId = decoder.openElement();
|
||||
@ -1510,11 +1513,59 @@ void TypeEnum::decode(Decoder &decoder,TypeFactory &typegrp)
|
||||
}
|
||||
if (nm.size() == 0)
|
||||
throw LowlevelError(name + ": TypeEnum field missing name attribute");
|
||||
nmap[val] = nm;
|
||||
if (nmap.find(val) != nmap.end()) {
|
||||
if (warning.empty())
|
||||
warning = "Enum \"" + name + "\": Some values do not have unique names";
|
||||
}
|
||||
else
|
||||
nmap[val] = nm;
|
||||
decoder.closeElement(childId);
|
||||
}
|
||||
setNameMap(nmap);
|
||||
// decoder.closeElement(elemId);
|
||||
return warning;
|
||||
}
|
||||
|
||||
/// Establish unique enumeration values for a TypeEnum.
|
||||
/// Fill in any values for any names that weren't explicitly assigned and check for duplicates.
|
||||
/// \param nmap will contain the map from values to names
|
||||
/// \param namelist is the list of names in the enumeration
|
||||
/// \param vallist is the corresponding list of values assigned to names in namelist
|
||||
/// \param assignlist is true if the corresponding name in namelist has an assigned value
|
||||
/// \param te is the TypeEnum that will eventually hold the enumeration values
|
||||
void TypeEnum::assignValues(map<uintb,string> &nmap,const vector<string> &namelist,vector<uintb> &vallist,
|
||||
const vector<bool> &assignlist,const TypeEnum *te)
|
||||
{
|
||||
map<uintb,string>::iterator mapiter;
|
||||
|
||||
uintb mask = calc_mask(te->getSize());
|
||||
uintb maxval = 0;
|
||||
for(uint4 i=0;i<namelist.size();++i) {
|
||||
uintb val;
|
||||
if (assignlist[i]) { // Did the user explicitly set value
|
||||
val = vallist[i];
|
||||
if (val > maxval)
|
||||
maxval = val;
|
||||
val &= mask;
|
||||
mapiter = nmap.find(val);
|
||||
if (mapiter != nmap.end()) {
|
||||
throw LowlevelError("Enum \""+te->name+"\": \""+namelist[i]+"\" is a duplicate value");
|
||||
}
|
||||
nmap[val] = namelist[i];
|
||||
}
|
||||
}
|
||||
for(uint4 i=0;i<namelist.size();++i) {
|
||||
uintb val;
|
||||
if (!assignlist[i]) {
|
||||
do {
|
||||
maxval += 1;
|
||||
val = maxval;
|
||||
val &= mask;
|
||||
mapiter = nmap.find(val);
|
||||
} while(mapiter != nmap.end());
|
||||
nmap[val] = namelist[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TypeStruct::TypeStruct(const TypeStruct &op)
|
||||
@ -1524,47 +1575,22 @@ TypeStruct::TypeStruct(const TypeStruct &op)
|
||||
alignSize = op.alignSize;
|
||||
}
|
||||
|
||||
/// Copy a list of fields into this structure, establishing its size.
|
||||
/// Copy a list of fields into this structure, establishing its size and alignment.
|
||||
/// Should only be called once when constructing the type.
|
||||
/// Size is calculated from the fields unless a \b fixedSize (>0) is passed in.
|
||||
/// Alignment is calculated from fields unless a \b fixedAlign (>0) is passed in.
|
||||
/// \param fd is the list of fields to copy in
|
||||
/// \param fixedSize (if > 0) indicates an overriding size in bytes
|
||||
/// \param fixedAlign (if > 0) indicates an overriding alignment in bytes
|
||||
void TypeStruct::setFields(const vector<TypeField> &fd,int4 fixedSize,int4 fixedAlign)
|
||||
/// \param newSize is the final size of the structure in bytes
|
||||
/// \param newAlign is the final alignment of the structure
|
||||
void TypeStruct::setFields(const vector<TypeField> &fd,int4 newSize,int4 newAlign)
|
||||
|
||||
{
|
||||
vector<TypeField>::const_iterator iter;
|
||||
int4 end;
|
||||
// Need to calculate size and alignment
|
||||
int4 calcSize = 0;
|
||||
int4 calcAlign = 1;
|
||||
for(iter=fd.begin();iter!=fd.end();++iter) {
|
||||
field.push_back(*iter);
|
||||
Datatype *fieldType = (*iter).type;
|
||||
end = (*iter).offset + fieldType->getSize();
|
||||
if (end > calcSize)
|
||||
calcSize = end;
|
||||
int4 curAlign = fieldType->getAlignment();
|
||||
if (curAlign > calcAlign)
|
||||
calcAlign = curAlign;
|
||||
}
|
||||
field = fd;
|
||||
size = newSize;
|
||||
alignment = newAlign;
|
||||
if (field.size() == 1) { // A single field
|
||||
if (field[0].type->getSize() == calcSize) // that fills the whole structure
|
||||
if (field[0].type->getSize() == size) // that fills the whole structure
|
||||
flags |= needs_resolution; // needs special attention
|
||||
}
|
||||
if (fixedSize > 0) { // Try to force a size
|
||||
if (fixedSize < calcSize) // If the forced size is smaller, this is an error
|
||||
throw LowlevelError("Trying to force too small a size on "+name);
|
||||
size = fixedSize;
|
||||
}
|
||||
else
|
||||
size = calcSize;
|
||||
alignment = (fixedAlign < 1) ? calcAlign : fixedAlign;
|
||||
calcAlignSize();
|
||||
if (fixedSize <= 0) { // Unless specifically overridden
|
||||
size = alignSize; // pad out structure to with alignment bytes
|
||||
}
|
||||
alignSize = calcAlignSize(size,alignment);
|
||||
}
|
||||
|
||||
/// Find the proper subfield given an offset. Return the index of that field
|
||||
@ -1813,25 +1839,49 @@ void TypeStruct::encode(Encoder &encoder) const
|
||||
encoder.closeElement(ELEM_TYPE);
|
||||
}
|
||||
|
||||
/// Children of the structure element describe each field.
|
||||
/// Read children of the structure element describing each field. Alignment is calculated from fields unless
|
||||
/// the \b alignment field is already >0. The fields must be in order, fit within the \b size field, have a
|
||||
/// valid name, and have a valid data-type, or an exception is thrown. Any fields that overlap their previous
|
||||
/// field are thrown out and a warning message is returned.
|
||||
/// \param decoder is the stream decoder
|
||||
/// \param typegrp is the factory owning the new structure
|
||||
void TypeStruct::decodeFields(Decoder &decoder,TypeFactory &typegrp)
|
||||
/// \return any warning associated with the structure
|
||||
string TypeStruct::decodeFields(Decoder &decoder,TypeFactory &typegrp)
|
||||
|
||||
{
|
||||
int4 calcAlign = 1;
|
||||
int4 maxoffset = 0;
|
||||
int4 calcSize = 0;
|
||||
int4 lastOff = -1;
|
||||
string warning;
|
||||
while(decoder.peekElement() != 0) {
|
||||
field.emplace_back(decoder,typegrp);
|
||||
int4 trialmax = field.back().offset + field.back().type->getSize();
|
||||
if (trialmax > maxoffset)
|
||||
maxoffset = trialmax;
|
||||
if (maxoffset > size) {
|
||||
TypeField &curField(field.back());
|
||||
if (curField.type == (Datatype *)0 || curField.type->getMetatype() == TYPE_VOID)
|
||||
throw LowlevelError("Bad field data-type for structure: "+getName());
|
||||
if (curField.name.size() == 0)
|
||||
throw LowlevelError("Bad field name for structure: "+getName());
|
||||
if (curField.offset < lastOff)
|
||||
throw LowlevelError("Fields are out of order");
|
||||
lastOff = curField.offset;
|
||||
if (curField.offset < calcSize) {
|
||||
ostringstream s;
|
||||
s << "Field " << field.back().name << " does not fit in structure " + name;
|
||||
if (warning.empty()) {
|
||||
s << "Struct \"" << name << "\": ignoring overlapping field \"" << curField.name << "\"";
|
||||
}
|
||||
else {
|
||||
s << "Struct \"" << name << "\": ignoring multiple overlapping fields";
|
||||
}
|
||||
warning = s.str();
|
||||
field.pop_back(); // Throw out the overlapping field
|
||||
continue;
|
||||
}
|
||||
calcSize = curField.offset + curField.type->getSize();
|
||||
if (calcSize > size) {
|
||||
ostringstream s;
|
||||
s << "Field " << curField.name << " does not fit in structure " + name;
|
||||
throw LowlevelError(s.str());
|
||||
}
|
||||
int4 curAlign = field.back().type->getAlignment();
|
||||
int4 curAlign = curField.type->getAlignment();
|
||||
if (curAlign > calcAlign)
|
||||
calcAlign = curAlign;
|
||||
}
|
||||
@ -1845,7 +1895,8 @@ void TypeStruct::decodeFields(Decoder &decoder,TypeFactory &typegrp)
|
||||
}
|
||||
if (alignment < 1)
|
||||
alignment = calcAlign;
|
||||
calcAlignSize();
|
||||
alignSize = calcAlignSize(size, alignment);
|
||||
return warning;
|
||||
}
|
||||
|
||||
/// If this method is called, the given data-type has a single component that fills it entirely
|
||||
@ -1932,21 +1983,30 @@ int4 TypeStruct::findCompatibleResolve(Datatype *ct) const
|
||||
|
||||
/// Assign an offset to fields in order so that each field starts at an aligned offset within the structure
|
||||
/// \param list is the list of fields
|
||||
void TypeStruct::assignFieldOffsets(vector<TypeField> &list)
|
||||
/// \param newSize passes back the calculated size of the structure
|
||||
/// \param newAlign passes back the calculated alignment
|
||||
void TypeStruct::assignFieldOffsets(vector<TypeField> &list,int4 &newSize,int4 &newAlign)
|
||||
|
||||
{
|
||||
int4 offset = 0;
|
||||
newAlign = 1;
|
||||
vector<TypeField>::iterator iter;
|
||||
for(iter=list.begin();iter!=list.end();++iter) {
|
||||
if ((*iter).type->getMetatype() == TYPE_VOID)
|
||||
throw LowlevelError("Illegal field data-type: void");
|
||||
if ((*iter).offset != -1) continue;
|
||||
int4 cursize = (*iter).type->getAlignSize();
|
||||
int4 align = (*iter).type->getAlignment() - 1;
|
||||
int4 align = (*iter).type->getAlignment();
|
||||
if (align > newAlign)
|
||||
newAlign = align;
|
||||
align -= 1;
|
||||
if (align > 0 && (offset & align)!=0)
|
||||
offset = (offset-(offset & align) + (align+1));
|
||||
(*iter).offset = offset;
|
||||
(*iter).ident = offset;
|
||||
offset += cursize;
|
||||
}
|
||||
newSize = calcAlignSize(offset, newAlign);
|
||||
}
|
||||
|
||||
/// Copy a list of fields into this union, establishing its size.
|
||||
@ -1954,34 +2014,15 @@ void TypeStruct::assignFieldOffsets(vector<TypeField> &list)
|
||||
/// Size is calculated from the fields unless a \b fixedSize (>0) is passed in.
|
||||
/// Alignment is calculated from fields unless a \b fixedAlign (>0) is passed in.
|
||||
/// \param fd is the list of fields to copy in
|
||||
/// \param fixedSize (if > 0) indicates an overriding size in bytes
|
||||
/// \param fixedAlign (if > 0) indicates an overriding alignment in bytes
|
||||
void TypeUnion::setFields(const vector<TypeField> &fd,int4 fixedSize,int4 fixedAlign)
|
||||
/// \param newSize is new size in bytes of the union
|
||||
/// \param newAlign is the new alignment
|
||||
void TypeUnion::setFields(const vector<TypeField> &fd,int4 newSize,int4 newAlign)
|
||||
|
||||
{
|
||||
vector<TypeField>::const_iterator iter;
|
||||
// Need to calculate size and alignment
|
||||
int4 calcSize = 0;
|
||||
int4 calcAlign = 1;
|
||||
for(iter=fd.begin();iter!=fd.end();++iter) {
|
||||
field.push_back(*iter);
|
||||
Datatype *fieldType = field.back().type;
|
||||
int4 end = fieldType->getSize();
|
||||
if (end > calcSize)
|
||||
calcSize = end;
|
||||
int4 curAlign = fieldType->getAlignment();
|
||||
if (curAlign > calcAlign)
|
||||
calcAlign = curAlign;
|
||||
}
|
||||
if (fixedSize > 0) { // If the caller is trying to force a size
|
||||
if (fixedSize < calcSize) // If the forced size is smaller, this is an error
|
||||
throw LowlevelError("Trying to force too small a size on "+name);
|
||||
size = fixedSize;
|
||||
}
|
||||
else
|
||||
size = calcSize;
|
||||
alignment = (fixedAlign < 1) ? calcAlign : fixedAlign;
|
||||
calcAlignSize();
|
||||
field = fd;
|
||||
size = newSize;
|
||||
alignment = newAlign;
|
||||
alignSize = calcAlignSize(size,alignment);
|
||||
}
|
||||
|
||||
/// Parse children of the \<type> element describing each field.
|
||||
@ -2008,7 +2049,7 @@ void TypeUnion::decodeFields(Decoder &decoder,TypeFactory &typegrp)
|
||||
markComplete(); // Otherwise the union is complete
|
||||
if (alignment < 1)
|
||||
alignment = calcAlign;
|
||||
calcAlignSize();
|
||||
alignSize = calcAlignSize(size,alignment);
|
||||
}
|
||||
|
||||
TypeUnion::TypeUnion(const TypeUnion &op)
|
||||
@ -2196,6 +2237,30 @@ int4 TypeUnion::findCompatibleResolve(Datatype *ct) const
|
||||
return -1;
|
||||
}
|
||||
|
||||
void TypeUnion::assignFieldOffsets(vector<TypeField> &list,int4 &newSize,int4 &newAlign,TypeUnion *tu)
|
||||
|
||||
{
|
||||
vector<TypeField>::iterator iter;
|
||||
|
||||
newSize = 0;
|
||||
newAlign = 1;
|
||||
for(iter=list.begin();iter!=list.end();++iter) {
|
||||
Datatype *ct = (*iter).type;
|
||||
// Do some sanity checks on the field
|
||||
if (ct == (Datatype *)0 || ct->getMetatype() == TYPE_VOID)
|
||||
throw LowlevelError("Bad field data-type for union: "+tu->getName());
|
||||
else if ((*iter).name.size() == 0)
|
||||
throw LowlevelError("Bad field name for union: "+tu->getName());
|
||||
(*iter).offset = 0;
|
||||
int4 end = ct->getSize();
|
||||
if (end > newSize)
|
||||
newSize = end;
|
||||
int4 curAlign = ct->getAlignment();
|
||||
if (curAlign > newAlign)
|
||||
newAlign = curAlign;
|
||||
}
|
||||
}
|
||||
|
||||
TypePartialStruct::TypePartialStruct(const TypePartialStruct &op)
|
||||
: Datatype(op)
|
||||
{
|
||||
@ -2209,7 +2274,7 @@ TypePartialStruct::TypePartialStruct(Datatype *contain,int4 off,int4 sz,Datatype
|
||||
{
|
||||
#ifdef CPUI_DEBUG
|
||||
if (contain->getMetatype() != TYPE_STRUCT && contain->getMetatype() != TYPE_ARRAY)
|
||||
throw LowlevelError("Parent of partial struct is not a struture or array");
|
||||
throw LowlevelError("Parent of partial struct is not a structure or array");
|
||||
#endif
|
||||
flags |= has_stripped;
|
||||
stripped = strip;
|
||||
@ -2217,6 +2282,19 @@ TypePartialStruct::TypePartialStruct(Datatype *contain,int4 off,int4 sz,Datatype
|
||||
offset = off;
|
||||
}
|
||||
|
||||
/// If the parent is an array, return the element data-type. Otherwise return the \b stripped data-type.
|
||||
/// \return the array element data-type or the \b stripped data-type.
|
||||
Datatype *TypePartialStruct::getComponentForPtr(void) const
|
||||
|
||||
{
|
||||
if (container->getMetatype() == TYPE_ARRAY) {
|
||||
Datatype *eltype = ((TypeArray *)container)->getBase();
|
||||
if (eltype->getMetatype() != TYPE_UNKNOWN && (offset % eltype->getAlignSize()) == 0)
|
||||
return eltype;
|
||||
}
|
||||
return stripped;
|
||||
}
|
||||
|
||||
void TypePartialStruct::printRaw(ostream &s) const
|
||||
|
||||
{
|
||||
@ -3118,6 +3196,7 @@ void TypeFactory::clear(void)
|
||||
tree.clear();
|
||||
nametree.clear();
|
||||
clearCache();
|
||||
warnings.clear();
|
||||
}
|
||||
|
||||
/// Delete anything that isn't a core type
|
||||
@ -3138,6 +3217,7 @@ void TypeFactory::clearNoncore(void)
|
||||
tree.erase(iter++);
|
||||
delete ct;
|
||||
}
|
||||
warnings.clear();
|
||||
}
|
||||
|
||||
TypeFactory::~TypeFactory(void)
|
||||
@ -3325,44 +3405,21 @@ void TypeFactory::setDisplayFormat(Datatype *ct,uint4 format)
|
||||
ct->setDisplayFormat(format);
|
||||
}
|
||||
|
||||
/// Make sure all the offsets are fully established then set fields of the structure
|
||||
/// If \b fixedsize is greater than 0, force the final structure to have that size.
|
||||
/// Set fields on a structure data-type, establishing its size, alignment, and other properties.
|
||||
/// This method should only be used on an incomplete structure. It will mark the structure as complete.
|
||||
/// \param fd is the list of fields to set
|
||||
/// \param ot is the TypeStruct object to modify
|
||||
/// \param fixedsize is -1 or the forced size of the structure
|
||||
/// \param fixedalign is -1 or the forced alignment for the structure
|
||||
/// \param newSize is the new size of the structure in bytes
|
||||
/// \param newAlign is the new alignment of the structure
|
||||
/// \param flags are other flags to set on the structure
|
||||
void TypeFactory::setFields(vector<TypeField> &fd,TypeStruct *ot,int4 fixedsize,int4 fixedalign,uint4 flags)
|
||||
void TypeFactory::setFields(const vector<TypeField> &fd,TypeStruct *ot,int4 newSize,int4 newAlign,uint4 flags)
|
||||
|
||||
{
|
||||
if (!ot->isIncomplete())
|
||||
throw LowlevelError("Can only set fields on an incomplete structure");
|
||||
int4 offset = 0;
|
||||
vector<TypeField>::iterator iter;
|
||||
|
||||
// Find the maximum offset, from the explicitly set offsets
|
||||
for(iter=fd.begin();iter!=fd.end();++iter) {
|
||||
Datatype *ct = (*iter).type;
|
||||
// Do some sanity checks on the field
|
||||
if (ct == (Datatype *)0 || ct->getMetatype() == TYPE_VOID)
|
||||
throw LowlevelError("Bad field data-type for structure: "+ot->getName());
|
||||
else if ((*iter).name.size() == 0)
|
||||
throw LowlevelError("Bad field name for structure: "+ot->getName());
|
||||
|
||||
if ((*iter).offset != -1) {
|
||||
int4 end = (*iter).offset + ct->getSize();
|
||||
if (end > offset)
|
||||
offset = end;
|
||||
}
|
||||
}
|
||||
|
||||
sort(fd.begin(),fd.end()); // Sort fields by offset
|
||||
|
||||
// We could check field overlapping here
|
||||
|
||||
tree.erase(ot);
|
||||
ot->setFields(fd,fixedsize,fixedalign);
|
||||
ot->setFields(fd,newSize,newAlign);
|
||||
ot->flags &= ~(uint4)Datatype::type_incomplete;
|
||||
ot->flags |= (flags & (Datatype::opaque_string | Datatype::variable_length | Datatype::type_incomplete));
|
||||
tree.insert(ot);
|
||||
@ -3370,33 +3427,20 @@ void TypeFactory::setFields(vector<TypeField> &fd,TypeStruct *ot,int4 fixedsize,
|
||||
recalcPointerSubmeta(ot, SUB_PTR_STRUCT);
|
||||
}
|
||||
|
||||
/// If \b fixedsize is greater than 0, force the final union to have that size.
|
||||
/// This method should only be used on an incomplete union. It will mark the union as complete.
|
||||
/// \param fd is the list of fields to set
|
||||
/// \param ot is the TypeUnion object to modify
|
||||
/// \param fixedsize is -1 or the forced size of the union
|
||||
/// \param fixedalign is -1 or the forced alignment for the union
|
||||
/// \param newSize is the size to associate with the union in bytes
|
||||
/// \param newAlign is the alignment to set
|
||||
/// \param flags are other flags to set on the union
|
||||
void TypeFactory::setFields(vector<TypeField> &fd,TypeUnion *ot,int4 fixedsize,int4 fixedalign,uint4 flags)
|
||||
void TypeFactory::setFields(const vector<TypeField> &fd,TypeUnion *ot,int4 newSize,int4 newAlign,uint4 flags)
|
||||
|
||||
{
|
||||
if (!ot->isIncomplete())
|
||||
throw LowlevelError("Can only set fields on an incomplete union");
|
||||
vector<TypeField>::iterator iter;
|
||||
|
||||
for(iter=fd.begin();iter!=fd.end();++iter) {
|
||||
Datatype *ct = (*iter).type;
|
||||
// Do some sanity checks on the field
|
||||
if (ct == (Datatype *)0 || ct->getMetatype() == TYPE_VOID)
|
||||
throw LowlevelError("Bad field data-type for union: "+ot->getName());
|
||||
else if ((*iter).offset != 0)
|
||||
throw LowlevelError("Non-zero field offset for union: "+ot->getName());
|
||||
else if ((*iter).name.size() == 0)
|
||||
throw LowlevelError("Bad field name for union: "+ot->getName());
|
||||
}
|
||||
|
||||
tree.erase(ot);
|
||||
ot->setFields(fd,fixedsize,fixedalign);
|
||||
ot->setFields(fd,newSize,newAlign);
|
||||
ot->flags &= ~(uint4)Datatype::type_incomplete;
|
||||
ot->flags |= (flags & (Datatype::variable_length | Datatype::type_incomplete));
|
||||
tree.insert(ot);
|
||||
@ -3419,52 +3463,14 @@ void TypeFactory::setPrototype(const FuncProto *fp,TypeCode *newCode,uint4 flags
|
||||
tree.insert(newCode);
|
||||
}
|
||||
|
||||
/// Set the list of enumeration values and identifiers for a TypeEnum
|
||||
/// Fill in any values for any names that weren't explicitly assigned
|
||||
/// and check for duplicates.
|
||||
/// \param namelist is the list of names in the enumeration
|
||||
/// \param vallist is the corresponding list of values assigned to names in namelist
|
||||
/// \param assignlist is true if the corresponding name in namelist has an assigned value
|
||||
/// \param te is the enumeration object to modify
|
||||
/// \return true if the modification is successful (no duplicate names)
|
||||
bool TypeFactory::setEnumValues(const vector<string> &namelist,
|
||||
const vector<uintb> &vallist,
|
||||
const vector<bool> &assignlist,
|
||||
TypeEnum *te)
|
||||
/// \param nmap is the mapping from integer value to name string
|
||||
/// \param te is the enumeration whose values/names are set
|
||||
void TypeFactory::setEnumValues(const map<uintb,string> &nmap,TypeEnum *te)
|
||||
|
||||
{
|
||||
map<uintb,string> nmap;
|
||||
map<uintb,string>::iterator mapiter;
|
||||
|
||||
uintb mask = calc_mask(te->getSize());
|
||||
uintb maxval = 0;
|
||||
for(uint4 i=0;i<namelist.size();++i) {
|
||||
uintb val;
|
||||
if (assignlist[i]) { // Did the user explicitly set value
|
||||
val = vallist[i];
|
||||
if (val > maxval)
|
||||
maxval = val;
|
||||
val &= mask;
|
||||
mapiter = nmap.find(val);
|
||||
if (mapiter != nmap.end()) return false; // Duplicate value
|
||||
nmap[val] = namelist[i];
|
||||
}
|
||||
}
|
||||
for(uint4 i=0;i<namelist.size();++i) {
|
||||
uintb val;
|
||||
if (!assignlist[i]) {
|
||||
val = maxval;
|
||||
maxval += 1;
|
||||
val &= mask;
|
||||
mapiter = nmap.find(val);
|
||||
if (mapiter != nmap.end()) return false;
|
||||
nmap[val] = namelist[i];
|
||||
}
|
||||
}
|
||||
|
||||
tree.erase(te);
|
||||
te->setNameMap(nmap);
|
||||
tree.insert(te);
|
||||
return true;
|
||||
}
|
||||
|
||||
/// Recursively write out all the components of a data-type in dependency order
|
||||
@ -3673,6 +3679,34 @@ void TypeFactory::recalcPointerSubmeta(Datatype *base,sub_metatype sub)
|
||||
}
|
||||
}
|
||||
|
||||
/// Add the data-type and string to the \b warnings container.
|
||||
/// \param dt is the data-type associated with the warning
|
||||
/// \param warn is the warning string to be displayed to the user
|
||||
void TypeFactory::insertWarning(Datatype *dt,string warn)
|
||||
|
||||
{
|
||||
if (dt->getId() == 0)
|
||||
throw LowlevelError("Can only issue warnings for named data-types");
|
||||
dt->flags |= Datatype::warning_issued;
|
||||
warnings.emplace_back(dt,warn);
|
||||
}
|
||||
|
||||
/// Run through the \b warnings and delete any matching the given data-type
|
||||
/// \param dt is the given data-type
|
||||
void TypeFactory::removeWarning(Datatype *dt)
|
||||
|
||||
{
|
||||
list<DatatypeWarning>::iterator iter = warnings.begin();
|
||||
while(iter != warnings.end()) {
|
||||
if ((*iter).dataType->getId() == dt->getId() && (*iter).dataType->getName() == dt->getName()) {
|
||||
iter = warnings.erase(iter);
|
||||
}
|
||||
else {
|
||||
++iter;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Find or create a data-type identical to the given data-type except for its name and id.
|
||||
/// If the name and id already describe an incompatible data-type, an exception is thrown.
|
||||
/// \param ct is the given data-type to clone
|
||||
@ -3759,21 +3793,6 @@ TypePointer *TypeFactory::getTypePointer(int4 s,Datatype *pt,uint4 ws,const stri
|
||||
return res;
|
||||
}
|
||||
|
||||
/// Don't create more than a depth of 1, i.e. ptr->ptr
|
||||
/// \param s is the size of the pointer
|
||||
/// \param pt is the pointed-to data-type
|
||||
/// \param ws is the wordsize associated with the pointer
|
||||
/// \return the TypePointer object
|
||||
TypePointer *TypeFactory::getTypePointerNoDepth(int4 s,Datatype *pt,uint4 ws)
|
||||
|
||||
{
|
||||
if (pt->getMetatype()==TYPE_PTR) {
|
||||
// Make sure that at least we return a pointer to something the size of -pt-
|
||||
pt = getBase(pt->getSize(),TYPE_UNKNOWN); // Pass back unknown *
|
||||
}
|
||||
return getTypePointer(s,pt,ws);
|
||||
}
|
||||
|
||||
/// \param as is the number of elements in the desired array
|
||||
/// \param ao is the data-type of the array element
|
||||
/// \return the TypeArray object
|
||||
@ -3977,6 +3996,8 @@ void TypeFactory::destroyType(Datatype *ct)
|
||||
{
|
||||
if (ct->isCoreType())
|
||||
throw LowlevelError("Cannot destroy core type");
|
||||
if (ct->hasWarning())
|
||||
removeWarning(ct);
|
||||
nametree.erase(ct);
|
||||
tree.erase(ct);
|
||||
delete ct;
|
||||
@ -4163,6 +4184,22 @@ Datatype *TypeFactory::decodeTypedef(Decoder &decoder)
|
||||
return getTypedef(defedType, nm, id, format);
|
||||
}
|
||||
|
||||
/// \param decoder is the stream decoder
|
||||
/// \param forcecore is \b true if the data-type is considered core
|
||||
/// \return the newly minted enumeration data-type
|
||||
Datatype *TypeFactory::decodeEnum(Decoder &decoder,bool forcecore)
|
||||
|
||||
{
|
||||
TypeEnum te(1,TYPE_INT); // size and metatype are replaced
|
||||
string warning = te.decode(decoder,*this);
|
||||
if (forcecore)
|
||||
te.flags |= Datatype::coretype;
|
||||
Datatype *res = findAdd(te);
|
||||
if (!warning.empty())
|
||||
insertWarning(res, warning);
|
||||
return res;
|
||||
}
|
||||
|
||||
/// If necessary create a stub object before parsing the field descriptions, to deal with recursive definitions
|
||||
/// \param decoder is the stream decoder
|
||||
/// \param forcecore is \b true if the data-type is considered core
|
||||
@ -4181,7 +4218,7 @@ Datatype* TypeFactory::decodeStruct(Decoder &decoder,bool forcecore)
|
||||
}
|
||||
else if (ct->getMetatype() != TYPE_STRUCT)
|
||||
throw LowlevelError("Trying to redefine type: " + ts.name);
|
||||
ts.decodeFields(decoder,*this);
|
||||
string warning = ts.decodeFields(decoder,*this);
|
||||
if (!ct->isIncomplete()) { // Structure of this name was already present
|
||||
if (0 != ct->compareDependency(ts))
|
||||
throw LowlevelError("Redefinition of structure: " + ts.name);
|
||||
@ -4189,6 +4226,8 @@ Datatype* TypeFactory::decodeStruct(Decoder &decoder,bool forcecore)
|
||||
else { // If structure is a placeholder stub
|
||||
setFields(ts.field,(TypeStruct*)ct,ts.size,ts.alignment,ts.flags); // Define structure now by copying fields
|
||||
}
|
||||
if (!warning.empty())
|
||||
insertWarning(ct, warning);
|
||||
// decoder.closeElement(elemId);
|
||||
return ct;
|
||||
}
|
||||
@ -4349,12 +4388,8 @@ Datatype *TypeFactory::decodeTypeNoRef(Decoder &decoder,bool forcecore)
|
||||
return ct;
|
||||
}
|
||||
else if (attribId == ATTRIB_ENUM && decoder.readBool()) {
|
||||
TypeEnum te(1,TYPE_INT); // size and metatype are replaced
|
||||
decoder.rewindAttributes();
|
||||
te.decode(decoder,*this);
|
||||
if (forcecore)
|
||||
te.flags |= Datatype::coretype;
|
||||
ct = findAdd(te);
|
||||
ct = decodeEnum(decoder, forcecore);
|
||||
decoder.closeElement(elemId);
|
||||
return ct;
|
||||
}
|
||||
|
@ -4,9 +4,9 @@
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@ -177,7 +177,8 @@ protected:
|
||||
needs_resolution = 0x800, ///< Datatype (union, pointer to union) needs resolution before propagation
|
||||
force_format = 0x7000, ///< 3-bits encoding display format, 0=none, 1=hex, 2=dec, 3=oct, 4=bin, 5=char
|
||||
truncate_bigendian = 0x8000, ///< Pointer can be truncated and is big endian
|
||||
pointer_to_array = 0x10000 ///< Data-type is a pointer to an array
|
||||
pointer_to_array = 0x10000, ///< Data-type is a pointer to an array
|
||||
warning_issued = 0x20000 ///< Data-type has an associated \e warning string
|
||||
};
|
||||
friend class TypeFactory;
|
||||
friend struct DatatypeCompare;
|
||||
@ -196,10 +197,11 @@ protected:
|
||||
void encodeTypedef(Encoder &encoder) const; ///< Encode \b this as a \e typedef element to a stream
|
||||
void markComplete(void) { flags &= ~(uint4)type_incomplete; } ///< Mark \b this data-type as completely defined
|
||||
void setDisplayFormat(uint4 format); ///< Set a specific display format
|
||||
void calcAlignSize(void); ///< Calculate aligned size, assuming alignment is known
|
||||
virtual Datatype *clone(void) const=0; ///< Clone the data-type
|
||||
static uint8 hashName(const string &nm); ///< Produce a data-type id by hashing the type name
|
||||
static uint8 hashSize(uint8 id,int4 size); ///< Reversibly hash size into id
|
||||
protected:
|
||||
static int4 calcAlignSize(int4 sz,int4 align); ///< Calculate aligned size, given size and alignment of data-type
|
||||
public:
|
||||
/// Construct the base data-type copying low-level properties of another
|
||||
Datatype(const Datatype &op) { size = op.size; name=op.name; displayName=op.displayName; metatype=op.metatype;
|
||||
@ -224,6 +226,7 @@ public:
|
||||
bool hasStripped(void) const { return (flags & has_stripped)!=0; } ///< Return \b true if \b this has a stripped form
|
||||
bool isIncomplete(void) const { return (flags & type_incomplete)!=0; } ///< Is \b this an incompletely defined data-type
|
||||
bool needsResolution(void) const { return (flags & needs_resolution)!=0; } ///< Is \b this a union or a pointer to union
|
||||
bool hasWarning(void) const { return (flags & warning_issued)!=0; } ///< Has a \e warning been issued about \b this data-type
|
||||
uint4 getInheritable(void) const { return (flags & coretype); } ///< Get properties pointers inherit
|
||||
uint4 getDisplayFormat(void) const; ///< Get the display format for constants with \b this data-type
|
||||
type_metatype getMetatype(void) const { return metatype; } ///< Get the type \b meta-type
|
||||
@ -468,7 +471,7 @@ protected:
|
||||
map<uintb,string> namemap; ///< Map from integer to name
|
||||
vector<uintb> masklist; ///< Masks for each bitfield within the enum
|
||||
void setNameMap(const map<uintb,string> &nmap); ///< Establish the value -> name map
|
||||
void decode(Decoder &decoder,TypeFactory &typegrp); ///< Restore \b this enum data-type from a stream
|
||||
string decode(Decoder &decoder,TypeFactory &typegrp); ///< Restore \b this enum data-type from a stream
|
||||
public:
|
||||
/// Construct from another TypeEnum
|
||||
TypeEnum(const TypeEnum &op);
|
||||
@ -485,6 +488,8 @@ public:
|
||||
virtual int4 compareDependency(const Datatype &op) const;
|
||||
virtual Datatype *clone(void) const { return new TypeEnum(*this); }
|
||||
virtual void encode(Encoder &encoder) const;
|
||||
static void assignValues(map<uintb,string> &nmap,const vector<string> &namelist,vector<uintb> &vallist,
|
||||
const vector<bool> &assignlist,const TypeEnum *te);
|
||||
};
|
||||
|
||||
/// \brief A composite Datatype object: A \b structure with component \b fields
|
||||
@ -495,7 +500,7 @@ protected:
|
||||
void setFields(const vector<TypeField> &fd,int4 fixedSize,int4 fixedAlign); ///< Establish fields for \b this
|
||||
int4 getFieldIter(int4 off) const; ///< Get index into field list
|
||||
int4 getLowerBoundField(int4 off) const; ///< Get index of last field before or equal to given offset
|
||||
void decodeFields(Decoder &decoder,TypeFactory &typegrp); ///< Restore fields from a stream
|
||||
string decodeFields(Decoder &decoder,TypeFactory &typegrp); ///< Restore fields from a stream
|
||||
public:
|
||||
TypeStruct(const TypeStruct &op); ///< Construct from another TypeStruct
|
||||
TypeStruct(void) : Datatype(0,-1,TYPE_STRUCT) { flags |= type_incomplete; } ///< Construct incomplete/empty TypeStruct
|
||||
@ -515,7 +520,7 @@ public:
|
||||
virtual Datatype *resolveInFlow(PcodeOp *op,int4 slot);
|
||||
virtual Datatype* findResolve(const PcodeOp *op,int4 slot);
|
||||
virtual int4 findCompatibleResolve(Datatype *ct) const;
|
||||
static void assignFieldOffsets(vector<TypeField> &list); ///< Assign field offsets
|
||||
static void assignFieldOffsets(vector<TypeField> &list,int4 &newSize,int4 &newAlign); ///< Assign field offsets
|
||||
static int4 scoreSingleComponent(Datatype *parent,PcodeOp *op,int4 slot); ///< Determine best type fit for given PcodeOp use
|
||||
};
|
||||
|
||||
@ -527,7 +532,7 @@ class TypeUnion : public Datatype {
|
||||
protected:
|
||||
friend class TypeFactory;
|
||||
vector<TypeField> field; ///< The list of fields
|
||||
void setFields(const vector<TypeField> &fd,int4 fixedSize,int4 fixedAlign); ///< Establish fields for \b this
|
||||
void setFields(const vector<TypeField> &fd,int4 newSize,int4 newAlign); ///< Establish fields for \b this
|
||||
void decodeFields(Decoder &decoder,TypeFactory &typegrp); ///< Restore fields from a stream
|
||||
public:
|
||||
TypeUnion(const TypeUnion &op); ///< Construct from another TypeUnion
|
||||
@ -545,6 +550,7 @@ public:
|
||||
virtual Datatype* findResolve(const PcodeOp *op,int4 slot);
|
||||
virtual int4 findCompatibleResolve(Datatype *ct) const;
|
||||
virtual const TypeField *resolveTruncation(int8 offset,PcodeOp *op,int4 slot,int8 &newoff);
|
||||
static void assignFieldOffsets(vector<TypeField> &list,int4 &newSize,int4 &newAlign,TypeUnion *tu); ///< Assign field offsets
|
||||
};
|
||||
|
||||
/// \brief A data-type that holds \e part of a TypeStruct or TypeArray
|
||||
@ -558,6 +564,7 @@ public:
|
||||
TypePartialStruct(Datatype *contain,int4 off,int4 sz,Datatype *strip); ///< Constructor
|
||||
int4 getOffset(void) const { return offset; } ///< Get the byte offset into the containing data-type
|
||||
Datatype *getParent(void) const { return container; } ///< Get the data-type containing \b this piece
|
||||
Datatype *getComponentForPtr(void) const; ///< Get (initial) component of array represented by \b this
|
||||
virtual void printRaw(ostream &s) const;
|
||||
virtual Datatype *getSubType(int8 off,int8 *newoff) const;
|
||||
virtual int4 getHoleSize(int4 off) const;
|
||||
@ -698,6 +705,19 @@ public:
|
||||
virtual void encode(Encoder &encoder) const;
|
||||
};
|
||||
|
||||
/// \brief A data-type associated with a \e warning string
|
||||
///
|
||||
/// The warning should be presented to the user whenever the data-type is used. A warning is typically
|
||||
/// issued for ill-formed data-types that have been modified to facilitate decompiler analysis.
|
||||
class DatatypeWarning {
|
||||
friend class TypeFactory;
|
||||
Datatype *dataType; ///< Data-type associated with the warning
|
||||
string warning; ///< An explanatory string which should be displayed to the user as a warning
|
||||
public:
|
||||
DatatypeWarning(Datatype *dt,string warn) { dataType = dt; warning = warn; } ///< Constructor
|
||||
const string &getWarning(void) const { return warning; } ///< Get the warning string
|
||||
};
|
||||
|
||||
/// \brief Container class for all Datatype objects in an Architecture
|
||||
class TypeFactory {
|
||||
int4 sizeOfInt; ///< Size of the core "int" data-type
|
||||
@ -716,6 +736,7 @@ class TypeFactory {
|
||||
Datatype *typecache16; ///< Specially cached 16-byte float type
|
||||
Datatype *type_nochar; ///< Same dimensions as char but acts and displays as an INT
|
||||
Datatype *charcache[5]; ///< Cached character data-types
|
||||
list<DatatypeWarning> warnings; ///< Warnings for the user about data-types in \b this factory
|
||||
Datatype *findNoName(Datatype &ct); ///< Find data-type (in this container) by function
|
||||
void insert(Datatype *newtype); ///< Insert pointer into the cross-reference sets
|
||||
Datatype *findAdd(Datatype &ct); ///< Find data-type in this container or add it
|
||||
@ -723,6 +744,7 @@ class TypeFactory {
|
||||
void decodeAlignmentMap(Decoder &decoder); ///< Parse a \<size_alignment_map> element
|
||||
void setDefaultAlignmentMap(void); ///< Provide default alignments for data-types
|
||||
Datatype *decodeTypedef(Decoder &decoder); ///< Restore a \<def> element describing a typedef
|
||||
Datatype *decodeEnum(Decoder &decoder,bool forcecore); ///< Restore a \<type> element describing an enumeration
|
||||
Datatype *decodeStruct(Decoder &decoder,bool forcecore); ///< Restore a \<type> element describing a structure
|
||||
Datatype *decodeUnion(Decoder &decoder,bool forcecore); ///< Restore a \<type> element describing a union
|
||||
Datatype *decodeCode(Decoder &decoder,bool isConstructor,bool isDestructor,bool forcecore); ///< Restore an element describing a code object
|
||||
@ -732,6 +754,8 @@ class TypeFactory {
|
||||
TypeUnicode *getTypeUnicode(const string &nm,int4 sz,type_metatype m); ///< Create a default "unicode" type
|
||||
TypeCode *getTypeCode(const string &n); ///< Create a default "code" type
|
||||
void recalcPointerSubmeta(Datatype *base,sub_metatype sub); ///< Recalculate submeta for pointers to given base data-type
|
||||
void insertWarning(Datatype *dt,string warn); ///< Register a new data-type warning with \b this factory
|
||||
void removeWarning(Datatype *dt); ///< Remove the warning associated with the given data-type
|
||||
protected:
|
||||
Architecture *glb; ///< The Architecture object that owns this TypeFactory
|
||||
Datatype *findByIdLocal(const string &nm,uint8 id) const; ///< Search locally by name and id
|
||||
@ -754,13 +778,10 @@ public:
|
||||
Datatype *findByName(const string &n); ///< Return type of given name
|
||||
Datatype *setName(Datatype *ct,const string &n); ///< Set the given types name
|
||||
void setDisplayFormat(Datatype *ct,uint4 format); ///< Set the display format associated with the given data-type
|
||||
void setFields(vector<TypeField> &fd,TypeStruct *ot,int4 fixedsize,int4 fixedalign,uint4 flags); ///< Set fields on a TypeStruct
|
||||
void setFields(vector<TypeField> &fd,TypeUnion *ot,int4 fixedsize,int4 fixedalign,uint4 flags); ///< Set fields on a TypeUnion
|
||||
void setFields(const vector<TypeField> &fd,TypeStruct *ot,int4 newSize,int4 newAlign,uint4 flags); ///< Set fields on a TypeStruct
|
||||
void setFields(const vector<TypeField> &fd,TypeUnion *ot,int4 newSize,int4 newAlign,uint4 flags); ///< Set fields on a TypeUnion
|
||||
void setPrototype(const FuncProto *fp,TypeCode *newCode,uint4 flags); ///< Set the prototype on a TypeCode
|
||||
bool setEnumValues(const vector<string> &namelist,
|
||||
const vector<uintb> &vallist,
|
||||
const vector<bool> &assignlist,
|
||||
TypeEnum *te); ///< Set named values for an enumeration
|
||||
void setEnumValues(const map<uintb,string> &nmap,TypeEnum *te); ///< Set named values for an enumeration
|
||||
Datatype *decodeType(Decoder &decoder); ///< Restore Datatype from a stream
|
||||
Datatype *decodeTypeWithCodeFlags(Decoder &decoder,bool isConstructor,bool isDestructor);
|
||||
TypeVoid *getTypeVoid(void); ///< Get the "void" data-type
|
||||
@ -772,7 +793,6 @@ public:
|
||||
TypePointer *getTypePointerStripArray(int4 s,Datatype *pt,uint4 ws); ///< Construct a pointer data-type, stripping an ARRAY level
|
||||
TypePointer *getTypePointer(int4 s,Datatype *pt,uint4 ws); ///< Construct an absolute pointer data-type
|
||||
TypePointer *getTypePointer(int4 s,Datatype *pt,uint4 ws,const string &n); ///< Construct a named pointer data-type
|
||||
TypePointer *getTypePointerNoDepth(int4 s,Datatype *pt,uint4 ws); ///< Construct a depth limited pointer data-type
|
||||
TypeArray *getTypeArray(int4 as,Datatype *ao); ///< Construct an array data-type
|
||||
TypeStruct *getTypeStruct(const string &n); ///< Create an (empty) structure
|
||||
TypePartialStruct *getTypePartialStruct(Datatype *contain,int4 off,int4 sz); ///< Create a partial structure
|
||||
@ -798,6 +818,8 @@ public:
|
||||
void parseEnumConfig(Decoder &decoder); ///< Parse the \<enum> tag
|
||||
void setCoreType(const string &name,int4 size,type_metatype meta,bool chartp); ///< Create a core data-type
|
||||
void cacheCoreTypes(void); ///< Cache common types
|
||||
list<DatatypeWarning>::const_iterator beginWarnings(void) const { return warnings.begin(); } ///< Start of data-type warnings
|
||||
list<DatatypeWarning>::const_iterator endWarnings(void) const { return warnings.end(); } ///< End of data-type warnings
|
||||
};
|
||||
|
||||
/// The display format for the data-type is changed based on the given format. A value of
|
||||
|
@ -175,6 +175,27 @@ OpCode TypeOp::floatSignManipulation(PcodeOp *op)
|
||||
return CPUI_MAX;
|
||||
}
|
||||
|
||||
/// \brief Propagate a dereferenced data-type up to its pointer data-type
|
||||
///
|
||||
/// Don't create more than a depth of 1, i.e. ptr->ptr
|
||||
/// \param pt is the pointed-to data-type
|
||||
/// \param sz is the size of the pointer
|
||||
/// \param wordsz is the wordsize associated with the pointer
|
||||
/// \return the TypePointer object
|
||||
Datatype *TypeOp::propagateToPointer(TypeFactory *t,Datatype *dt,int4 sz,int4 wordsz)
|
||||
|
||||
{
|
||||
type_metatype meta = dt->getMetatype();
|
||||
if (meta==TYPE_PTR) {
|
||||
// Make sure that at least we return a pointer to something the size of -pt-
|
||||
dt = t->getBase(dt->getSize(),TYPE_UNKNOWN); // Pass back unknown *
|
||||
}
|
||||
else if (meta == TYPE_PARTIALSTRUCT) {
|
||||
dt = ((TypePartialStruct *)dt)->getComponentForPtr();
|
||||
}
|
||||
return t->getTypePointer(sz,dt,wordsz);
|
||||
}
|
||||
|
||||
/// \param t is the TypeFactory used to construct data-types
|
||||
/// \param opc is the op-code value the new object will represent
|
||||
/// \param n is the display name that will represent the op-code
|
||||
@ -440,7 +461,7 @@ Datatype *TypeOpLoad::propagateType(Datatype *alttype,PcodeOp *op,Varnode *invn,
|
||||
Datatype *newtype;
|
||||
if (inslot == -1) { // Propagating output to input (value to ptr)
|
||||
AddrSpace *spc = op->getIn(0)->getSpaceFromConst();
|
||||
newtype = tlst->getTypePointerNoDepth(outvn->getSize(),alttype,spc->getWordSize());
|
||||
newtype = propagateToPointer(tlst,alttype,outvn->getSize(),spc->getWordSize());
|
||||
}
|
||||
else if (alttype->getMetatype()==TYPE_PTR) {
|
||||
newtype = ((TypePointer *)alttype)->getPtrTo();
|
||||
@ -515,7 +536,7 @@ Datatype *TypeOpStore::propagateType(Datatype *alttype,PcodeOp *op,Varnode *invn
|
||||
Datatype *newtype;
|
||||
if (inslot==2) { // Propagating value to ptr
|
||||
AddrSpace *spc = op->getIn(0)->getSpaceFromConst();
|
||||
newtype = tlst->getTypePointerNoDepth(outvn->getSize(),alttype,spc->getWordSize());
|
||||
newtype = propagateToPointer(tlst,alttype,outvn->getSize(),spc->getWordSize());
|
||||
}
|
||||
else if (alttype->getMetatype()==TYPE_PTR) {
|
||||
newtype = ((TypePointer *)alttype)->getPtrTo();
|
||||
@ -816,7 +837,7 @@ Datatype *TypeOpCallother::getOutputLocal(const PcodeOp *op) const
|
||||
TypeOpReturn::TypeOpReturn(TypeFactory *t) : TypeOp(t,CPUI_RETURN,"return")
|
||||
|
||||
{
|
||||
opflags = PcodeOp::special|PcodeOp::returns|PcodeOp::nocollapse|PcodeOp::no_copy_propagation;
|
||||
opflags = PcodeOp::special|PcodeOp::returns|PcodeOp::nocollapse|PcodeOp::return_copy;
|
||||
behave = new OpBehavior(CPUI_RETURN,false,true); // Dummy behavior
|
||||
}
|
||||
|
||||
|
@ -180,6 +180,7 @@ public:
|
||||
|
||||
/// \brief Return the floating-point operation associated with the \e sign bit manipulation by the given PcodeOp
|
||||
static OpCode floatSignManipulation(PcodeOp *op);
|
||||
static Datatype *propagateToPointer(TypeFactory *t,Datatype *dt,int4 sz,int4 wordsz);
|
||||
};
|
||||
|
||||
// Major classes of operations
|
||||
|
@ -0,0 +1,31 @@
|
||||
<decompilertest>
|
||||
<binaryimage arch="x86:LE:64:default:gcc">
|
||||
<!--
|
||||
Function makes two changes to the same global address but with different sized accesses.
|
||||
One access is through an alias whose recovery is delayed, forcing the SSA form for the
|
||||
address to be revisited.
|
||||
-->
|
||||
<bytechunk space="ram" offset="0x100000" readonly="true">
|
||||
488d1d6d00000048891d52000000488b
|
||||
0d4b0000008b0183c00a890166e84800
|
||||
668b054d000000660564006689054200
|
||||
0000c3
|
||||
</bytechunk>
|
||||
<symbol space="ram" offset="0x100000" name="revisit"/>
|
||||
</binaryimage>
|
||||
<script>
|
||||
<com>lo fu revisit</com>
|
||||
<com>decompile</com>
|
||||
<com>print raw</com>
|
||||
<com>quit</com>
|
||||
</script>
|
||||
<stringmatch name="Revisit SSA #1" min="1" max="1">EAX\(0x00100017:.*\) = r0x00100074:4\(i\) \+ #0xa</stringmatch>
|
||||
<stringmatch name="Revisit SSA #2" min="1" max="1">r0x00100074:4\(0x0010001c:.*\) = EAX\(0x00100017:.*\) \[\] i0x0010001c:</stringmatch>
|
||||
<stringmatch name="Revisit SSA #3" min="1" max="1">r0x00100074:2\(0x0010001c:.*\) = SUB42\(r0x00100074:4\(0x0010001c:.*\),#0x0:4\)</stringmatch>
|
||||
<stringmatch name="Revisit SSA #4" min="1" max="1">AX\(0x00100027:.*\) = r0x00100074:2\(0x0010001c:.*\) \+ #0x64:2</stringmatch>
|
||||
<stringmatch name="Revisit SSA #5" min="1" max="1">r0x00100076:2\(0x0010002b:.*\) = SUB42\(r0x00100074:4\(0x0010001c:.*\),#0x2\)</stringmatch>
|
||||
<stringmatch name="Revisit SSA #6" min="1" max="1">r0x00100074:4\(0x0010002b:.*\) = CONCAT22\(r0x00100076:2\(0x0010002b:.*\),AX\(0x00100027:.*\)\)</stringmatch>
|
||||
<stringmatch name="Revisit SSA #7" min="1" max="1">r0x00100074:4\(0x00100032:.*\) = r0x00100074:4\(0x0010002b:.*\)</stringmatch>
|
||||
<stringmatch name="Revisit SSA #8" min="0" max="0">r0x00100074:2\(.*\) = .* \[\] i0x</stringmatch>
|
||||
<stringmatch name="Revisit SSA #9" min="0" max="0">r0x00100074:2\(0x00100032:.*\) = r0x00100074:2.*</stringmatch>
|
||||
</decompilertest>
|
@ -4,9 +4,9 @@
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@ -46,11 +46,11 @@ import ghidra.util.data.DataTypeParser.AllowedDataTypes;
|
||||
*/
|
||||
public abstract class AbstractDecompilerAction extends DockingAction {
|
||||
|
||||
AbstractDecompilerAction(String name) {
|
||||
protected AbstractDecompilerAction(String name) {
|
||||
super(name, DecompilePlugin.class.getSimpleName());
|
||||
}
|
||||
|
||||
AbstractDecompilerAction(String name, KeyBindingType kbType) {
|
||||
protected AbstractDecompilerAction(String name, KeyBindingType kbType) {
|
||||
super(name, DecompilePlugin.class.getSimpleName(), kbType);
|
||||
}
|
||||
|
||||
|
@ -4,9 +4,9 @@
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@ -464,17 +464,17 @@ public class VxWorksSymTab_Finder extends GhidraScript {
|
||||
|
||||
private boolean isValidSymType(byte symType) {
|
||||
switch (symType) {
|
||||
case 0: // Undefined Symbol
|
||||
case 0: // Undefined Symbol
|
||||
return false;
|
||||
case 2: // Local Absolute
|
||||
case 3: // Global Absolute
|
||||
case 6: // Local Data
|
||||
case 7: // Global Data
|
||||
case 8: // Local BSS
|
||||
case 9: // Global BSS
|
||||
case 4: // Local .text
|
||||
case 5: // Global .text
|
||||
case 0x11: // External ref -- ignore
|
||||
case 2: // Local Absolute
|
||||
case 3: // Global Absolute
|
||||
case 6: // Local Data
|
||||
case 7: // Global Data
|
||||
case 8: // Local BSS
|
||||
case 9: // Global BSS
|
||||
case 4: // Local .text
|
||||
case 5: // Global .text
|
||||
case 0x11: // External ref
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
@ -815,18 +815,21 @@ public class VxWorksSymTab_Finder extends GhidraScript {
|
||||
println("NULL symType!");
|
||||
break;
|
||||
|
||||
case 2: // Local Absolute
|
||||
case 3: // Global Absolute
|
||||
case 6: // Local Data
|
||||
case 7: // Global Data
|
||||
case 8: // Local BSS
|
||||
case 9: // Global BSS
|
||||
case 2: // Local Absolute
|
||||
case 3: // Global Absolute
|
||||
case 6: // Local Data
|
||||
case 7: // Global Data
|
||||
case 8: // Local BSS
|
||||
case 9: // Global BSS
|
||||
case 0x11: // External ref
|
||||
|
||||
createLabel(symLoc, symName, true);
|
||||
applyDemangled(symLoc, symName, symDemangledName);
|
||||
break;
|
||||
|
||||
case 4: // Local .text
|
||||
case 5: // Global .text
|
||||
|
||||
doLocalDisassemble(symLoc);
|
||||
createFunction(symLoc, symName);
|
||||
if (getFunctionAt(symLoc) != null) {
|
||||
@ -840,11 +843,8 @@ public class VxWorksSymTab_Finder extends GhidraScript {
|
||||
}
|
||||
break;
|
||||
|
||||
case 0x11: // External ref -- ignore
|
||||
break;
|
||||
|
||||
default:
|
||||
println("Invalid symType!");
|
||||
println("Invalid symType " + symType + " !");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -78,16 +78,21 @@ public class RttiUtil {
|
||||
if (matchingSymbol != null) {
|
||||
return false;
|
||||
}
|
||||
// Don't create it if a similar symbol already exists at the address of the data.
|
||||
SymbolIterator symbols = symbolTable.getSymbolsAsIterator(rttiAddress);
|
||||
for (Symbol symbol : symbols) {
|
||||
String name = symbol.getName();
|
||||
if (name.contains(rttiSuffix)) {
|
||||
return false; // Similar symbol already exists.
|
||||
}
|
||||
}
|
||||
try {
|
||||
|
||||
// NOTE: This code was originally put here to skip applying labels the pdb put down if the
|
||||
// above check failed but symbols were similar. This check has been removed because of
|
||||
// cases where this check stopped the full namespace path from being created. The code is
|
||||
// here commented out because we might want to use this to do extra checking and possibly
|
||||
// remove the similar symbol instead of leaving it as a secondary symbol.
|
||||
// Don't create it if a similar symbol already exists at the address of the data.
|
||||
// SymbolIterator symbols = symbolTable.getSymbolsAsIterator(rttiAddress);
|
||||
// for (Symbol symbol : symbols) {
|
||||
// String name = symbol.getName();
|
||||
// if (name.contains(rttiSuffix)) {
|
||||
// return false; // Similar symbol already exists.
|
||||
// }
|
||||
// }
|
||||
try {
|
||||
// Ignore imported mangled symbol because demangling would add tick marks into the name.
|
||||
// The name created here is better. Set the symbol to be primary so that the demangler
|
||||
// won't demangle.
|
||||
|
@ -4,9 +4,9 @@
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@ -21,6 +21,7 @@ import ghidra.app.util.bin.format.pdb2.pdbreader.symbol.AbstractMsSymbol;
|
||||
import ghidra.app.util.bin.format.pdb2.pdbreader.symbol.AbstractUserDefinedTypeMsSymbol;
|
||||
import ghidra.app.util.bin.format.pdb2.pdbreader.type.AbstractComplexMsType;
|
||||
import ghidra.app.util.bin.format.pdb2.pdbreader.type.AbstractMsType;
|
||||
import ghidra.app.util.pdb.PdbNamespaceUtils;
|
||||
import ghidra.program.model.data.*;
|
||||
import ghidra.util.exception.AssertException;
|
||||
import ghidra.util.exception.CancelledException;
|
||||
@ -116,6 +117,7 @@ public class TypedefSymbolApplier extends MsSymbolApplier
|
||||
}
|
||||
|
||||
SymbolPath symbolPath = new SymbolPath(name);
|
||||
symbolPath = PdbNamespaceUtils.convertToGhidraPathName(symbolPath);
|
||||
CategoryPath categoryPath =
|
||||
applicator.getTypedefsCategory(applicator.getCurrentModuleNumber(), symbolPath);
|
||||
DataType typedef = new TypedefDataType(categoryPath.getParent(), categoryPath.getName(),
|
||||
|
@ -4,9 +4,9 @@
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@ -400,6 +400,7 @@ public class DefaultPcodeThread<T> implements PcodeThread<T> {
|
||||
|
||||
@Override
|
||||
public void stepInstruction() {
|
||||
assertCompletedInstruction();
|
||||
PcodeProgram inj = getInject(counter);
|
||||
if (inj != null) {
|
||||
instruction = null;
|
||||
@ -576,7 +577,6 @@ public class DefaultPcodeThread<T> implements PcodeThread<T> {
|
||||
|
||||
@Override
|
||||
public void executeInstruction() {
|
||||
assertCompletedInstruction();
|
||||
instruction = decoder.decodeInstruction(counter, context);
|
||||
PcodeProgram insProg = PcodeProgram.fromInstruction(instruction);
|
||||
preExecuteInstruction();
|
||||
|
@ -4,9 +4,9 @@
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@ -159,7 +159,7 @@ public class PcodeFrame {
|
||||
}
|
||||
|
||||
/**
|
||||
* The number of p-code ops executed
|
||||
* Get and reset the number of p-code ops executed
|
||||
*
|
||||
* <p>
|
||||
* Contrast this to {@link #index()}, which marks the next op to be executed. This counts the
|
||||
@ -167,7 +167,9 @@ public class PcodeFrame {
|
||||
*
|
||||
* @return the count
|
||||
*/
|
||||
public int count() {
|
||||
public int resetCount() {
|
||||
int count = this.count;
|
||||
this.count = 0;
|
||||
return count;
|
||||
}
|
||||
|
||||
|
@ -4,9 +4,9 @@
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@ -288,12 +288,12 @@ public class FileDataTypeManager extends StandAloneDataTypeManager
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
public synchronized void close() {
|
||||
if (packedDB != null) {
|
||||
super.close();
|
||||
packedDB.dispose();
|
||||
packedDB = null;
|
||||
}
|
||||
super.close();
|
||||
}
|
||||
|
||||
public boolean isClosed() {
|
||||
|
@ -542,14 +542,14 @@ public class PcodeDataTypeManager {
|
||||
encoder.openElement(ELEM_TYPE);
|
||||
encodeNameIdAttributes(encoder, type);
|
||||
String metatype = type.isSigned() ? "int" : "uint";
|
||||
long[] keys = type.getValues();
|
||||
String[] names = type.getNames();
|
||||
encoder.writeString(ATTRIB_METATYPE, metatype);
|
||||
encoder.writeSignedInteger(ATTRIB_SIZE, type.getLength());
|
||||
encoder.writeBool(ATTRIB_ENUM, true);
|
||||
for (long key : keys) {
|
||||
for (String name : names) {
|
||||
encoder.openElement(ELEM_VAL);
|
||||
encoder.writeString(ATTRIB_NAME, type.getName(key));
|
||||
encoder.writeSignedInteger(ATTRIB_VALUE, key);
|
||||
encoder.writeString(ATTRIB_NAME, name);
|
||||
encoder.writeSignedInteger(ATTRIB_VALUE, type.getValue(name));
|
||||
encoder.closeElement(ELEM_VAL);
|
||||
}
|
||||
encoder.closeElement(ELEM_TYPE);
|
||||
|
@ -4,9 +4,9 @@
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@ -20,7 +20,7 @@ import junit.framework.Test;
|
||||
|
||||
public class ARM_v8m_O0_EmulatorTest extends ProcessorEmulatorTestAdapter {
|
||||
|
||||
private static final String LANGUAGE_ID = "ARM:LE:32:Cortex8m";
|
||||
private static final String LANGUAGE_ID = "ARM:LE:32:v8-m";
|
||||
private static final String COMPILER_SPEC_ID = "default";
|
||||
|
||||
private static final String[] REG_DUMP_SET = new String[] {};
|
||||
|
@ -4,9 +4,9 @@
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@ -20,7 +20,7 @@ import junit.framework.Test;
|
||||
|
||||
public class ARM_v8m_O3_EmulatorTest extends ProcessorEmulatorTestAdapter {
|
||||
|
||||
private static final String LANGUAGE_ID = "ARM:LE:32:Cortex8m";
|
||||
private static final String LANGUAGE_ID = "ARM:LE:32:v8-m";
|
||||
private static final String COMPILER_SPEC_ID = "default";
|
||||
|
||||
private static final String[] REG_DUMP_SET = new String[] {};
|
||||
|
@ -1,5 +1,5 @@
|
||||
application.name=Ghidra
|
||||
application.version=11.2
|
||||
application.version=11.2.1
|
||||
application.release.name=DEV
|
||||
application.layout.version=3
|
||||
application.gradle.min=8.5
|
||||
|
Loading…
Reference in New Issue
Block a user