GP-1753 - Fixed the Memory Search Dialog button enablement when closing

a long running search results window
This commit is contained in:
dragonmacher 2022-02-22 17:36:38 -05:00
parent 026fad27ab
commit a51ea1c2de
9 changed files with 249 additions and 274 deletions

View File

@ -171,7 +171,7 @@ class MemSearchDialog extends DialogComponentProvider {
super.executeProgressTask(task, delay);
}
void updateSearchButtonEnablement() {
private void updateSearchButtonEnablement() {
nextButton.setEnabled(searchEnabled && !isSearching && hasValidSearchData);
previousButton.setEnabled(searchEnabled && !isSearching && hasValidSearchData &&
currentFormat.supportsBackwardsSearch());
@ -376,7 +376,6 @@ class MemSearchDialog extends DialogComponentProvider {
private Container createSeparatorPanel() {
JPanel panel = new JPanel(new GridLayout(1, 1));
panel.setBorder(BorderFactory.createEmptyBorder(10, 0, 10, 10));
panel.add(new JSeparator(SwingConstants.VERTICAL));
return panel;
}
@ -386,22 +385,14 @@ class MemSearchDialog extends DialogComponentProvider {
panel.setBorder(BorderFactory.createEmptyBorder(0, 0, 10, 10));
panel.add(createSeparatorPanel(), BorderLayout.WEST);
panel.add(buildAdvancedPanelContents());
return panel;
}
private Container buildAdvancedPanelContents() {
JPanel panel = new JPanel(new VerticalLayout(5));
// endieness
panel.add(buildEndienessPanel());
// defined/undefined data
panel.add(buildCodeUnitTypesPanel());
// alignment
panel.add(buildAlignmentPanel());
return panel;
}
@ -574,7 +565,6 @@ class MemSearchDialog extends DialogComponentProvider {
optionsPanel.setBorder(BorderFactory.createEmptyBorder(0, 10, 20, 10));
optionsPanel.setLayout(new BoxLayout(optionsPanel, BoxLayout.Y_AXIS));
optionsPanel.add(northPanel);
// optionsPanel.add( southPanel );
optionsPanel.add(advancedButtonPanel);
return optionsPanel;
@ -664,9 +654,6 @@ class MemSearchDialog extends DialogComponentProvider {
return false;
}
/* (non Javadoc)
* @see ghidra.util.bean.GhidraDialog#getTaskScheduler()
*/
@Override
protected TaskScheduler getTaskScheduler() {
return super.getTaskScheduler();
@ -813,16 +800,16 @@ class MemSearchDialog extends DialogComponentProvider {
}
}
public void setSearchEnabled(boolean b) {
void setSearchEnabled(boolean b) {
searchEnabled = b;
}
public void searchCompleted() {
void searchCompleted() {
isSearching = false;
updateSearchButtonEnablement();
}
public String getSearchText() {
String getSearchText() {
return valueComboBox.getText();
}

View File

@ -174,6 +174,7 @@ public class MemSearchPlugin extends Plugin implements OptionsChangeListener,
if (program == null) {
return false;
}
searchAgainAction.setEnabled(true);
if (localSearchInfo.isSearchAll()) {
@ -363,6 +364,11 @@ public class MemSearchPlugin extends Plugin implements OptionsChangeListener,
setNavigatable(context.getNavigatable());
performSearch(searchInfo);
}
@Override
protected boolean isEnabledForContext(NavigatableActionContext context) {
return searchInfo != null && super.isEnabledForContext(context);
}
};
searchAgainAction
.setHelpLocation(new HelpLocation(HelpTopics.SEARCH, searchAgainAction.getName()));
@ -501,7 +507,6 @@ public class MemSearchPlugin extends Plugin implements OptionsChangeListener,
TableService tableService) {
String searchString = searchDialog.getSearchText();
String title = "Search Memory - \"" + searchString + "\"";
String type = "Search";
if (navigatable.supportsMarkers()) {
@ -556,7 +561,7 @@ public class MemSearchPlugin extends Plugin implements OptionsChangeListener,
// Inner Classes
//==================================================================================================
class TableLoadingListener implements ThreadedTableModelListener {
private class TableLoadingListener implements ThreadedTableModelListener {
private ThreadedTableModel<MemSearchResult, ?> model;

View File

@ -53,6 +53,7 @@ public class MemSearcherAlgorithm implements MemorySearchAlgorithm {
int progressCount = 0;
while (addressRanges.hasNext() && !monitor.isCancelled()) {
AddressRange range = addressRanges.next();
searchRange(accumulator, range, monitor, progressCount);
progressCount += range.getLength();
@ -73,6 +74,7 @@ public class MemSearcherAlgorithm implements MemorySearchAlgorithm {
while (startAddress != null && !monitor.isCancelled()) {
Address matchAddress = mem.findBytes(startAddress, endAddress, searchData.getBytes(),
searchData.getMask(), forwardSearch, monitor);
if (isMatchingAddress(matchAddress)) {
MemSearchResult result = new MemSearchResult(matchAddress, length);
accumulator.add(result);

View File

@ -1,6 +1,5 @@
/* ###
* IP: GHIDRA
* REVIEWED: YES
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -20,12 +19,7 @@ import ghidra.util.task.Task;
import ghidra.util.task.TaskMonitor;
/**
* Calls a method on the Ghidra Dialog to get a TaskManager.
* The dialog shows a progress bar; this class schedules a task to run;
* when the task completes notify the dialog to hide the progress bar.
*
*
*
* Schedules tasks to be run in the {@link DialogComponentProvider}.
*/
public class TaskScheduler implements Runnable {
@ -37,17 +31,15 @@ public class TaskScheduler implements Runnable {
private Thread taskThread;
/**
* Constructs a new TaskScheduler
* @param comp the
* Constructor
* @param comp the dialog provider.
*/
TaskScheduler(DialogComponentProvider comp) {
this.comp = comp;
}
/**
* Set the next task to run; does not affect a currently running
* task.
* Set the next task to run; does not affect a currently running task.
* @param task the next task to run.
* @param delay time in milliseconds to delay showing progress or activity.
*/
@ -61,9 +53,6 @@ public class TaskScheduler implements Runnable {
}
}
/**
* @see java.lang.Runnable#run()
*/
@Override
public void run() {
@ -84,7 +73,6 @@ public class TaskScheduler implements Runnable {
/**
* Blocks until the current task completes.
*
*/
public void waitForCurrentTask() {
Thread t = getCurrentThread();
@ -100,17 +88,14 @@ public class TaskScheduler implements Runnable {
/**
* Clear the scheduled task; does not affect the currently running task.
*
*/
synchronized void clearScheduledTask() {
scheduledTask = null;
}
/**
* Returns true if this task scheduler is in the process of running a task or has a pending
* task.
* @return true if this task scheduler is in the process of running a task or has a pending
* task.
* Returns true if this task scheduler is running a task or has a pending task.
* @return true if this task scheduler is running a task or has a pending task.
*/
public synchronized boolean isBusy() {
return taskThread != null || scheduledTask != null;

View File

@ -20,10 +20,8 @@ import static org.apache.commons.lang3.exception.ExceptionUtils.*;
import java.util.Collection;
import java.util.concurrent.CountDownLatch;
import javax.swing.SwingUtilities;
import ghidra.util.Msg;
import ghidra.util.SystemUtilities;
import ghidra.util.Swing;
import ghidra.util.datastruct.SynchronizedListAccumulator;
import ghidra.util.exception.CancelledException;
import ghidra.util.task.SwingUpdateManager;
@ -35,7 +33,7 @@ public class IncrementalLoadJob<ROW_OBJECT> extends Job implements ThreadedTable
private final ThreadedTableModel<ROW_OBJECT, ?> threadedModel;
private final ThreadedTableModelUpdateMgr<ROW_OBJECT> updateManager;
/**
/**
* Used to signal that the updateManager has finished loading the final contents gathered
* by this job.
*/
@ -67,8 +65,9 @@ public class IncrementalLoadJob<ROW_OBJECT> extends Job implements ThreadedTable
// InterruptedException handled via notification below
if (!hasCause(e, InterruptedException.class)) {
// TODO: is there a better way to handle exceptions? If we don't grab it and show
// it here, then it will be handled by the Worker, which just prints it to the console.
// Plus, handling it here gives us a chance to notify that the process is complete.
// it here, then it will be handled by the Worker, which just prints it to the
// console. Plus, handling it here gives us a chance to notify that the process is
// complete.
String name = threadedModel.getName();
Msg.showError(this, null, "Unexpected Exception",
"Unexpected exception loading table model \"" + name + "\"", e);
@ -103,7 +102,7 @@ public class IncrementalLoadJob<ROW_OBJECT> extends Job implements ThreadedTable
}
catch (InterruptedException e) {
// This implies the user has cancelled the job by starting a new one or that we have
// been disposed. Whatever the cause, we want to let the control flow continue as
// been disposed. Whatever the cause, we want to let the control flow continue as
// normal.
Thread.currentThread().interrupt(); // preserve the interrupt status
}
@ -121,14 +120,14 @@ public class IncrementalLoadJob<ROW_OBJECT> extends Job implements ThreadedTable
//
// ...Add a listener to know when we can tell our listeners that loading is finished.
//
// (Add the listener later so that we don't get notified of any pending events from
// the update manager that may have already been posted by the time we have
// (Add the listener later so that we don't get notified of any pending events from
// the update manager that may have already been posted by the time we have
// acquired the lock above.)
//
// Also, we are guaranteed that the flush() call will not call to the Swing thread
// before we add our listener due to the fact that we currently hold a lock on the
// updateManager, which is required to notify the listeners. The basic sequence is:
//
//
// -Flush starts a job thread (or uses the running one)
// -We have the update manager's lock, so it blocks on jobDone() if that happens before
// this code can return
@ -137,21 +136,20 @@ public class IncrementalLoadJob<ROW_OBJECT> extends Job implements ThreadedTable
// -A block on jobDone() can now complete as we release the lock
// -jobDone() will notify listeners in an invokeLater(), which puts it behind ours
//
SwingUtilities.invokeLater(
Swing.runLater(
() -> updateManager.addThreadedTableListener(IncrementalLoadJob.this));
}
}
private void notifyStarted(TaskMonitor monitor) {
if (listener != null) {
SystemUtilities.runIfSwingOrPostSwingLater(() -> listener.loadingStarted());
Swing.runIfSwingOrRunLater(() -> listener.loadingStarted());
}
}
private void notifyCompleted(final boolean wasCancelled) {
private void notifyCompleted(boolean wasCancelled) {
if (listener != null) {
SystemUtilities.runIfSwingOrPostSwingLater(
() -> listener.loadingFinished(wasCancelled));
Swing.runIfSwingOrRunLater(() -> listener.loadingFinished(wasCancelled));
}
updateManager.removeThreadedTableListener(this);
@ -178,7 +176,7 @@ public class IncrementalLoadJob<ROW_OBJECT> extends Job implements ThreadedTable
@Override
public void loadingFinished(boolean wasCancelled) {
// At this point we are in a normal completed callback from the update manager. This
// is when we need the latch to be counted down. In the abnormal failure/cancel case,
// is when we need the latch to be counted down. In the abnormal failure/cancel case,
// we will not block on the latch and thus it doesn't need to be counted down elsewhere.
completedCallbackLatch.countDown();
}
@ -188,7 +186,7 @@ public class IncrementalLoadJob<ROW_OBJECT> extends Job implements ThreadedTable
//==================================================================================================
/**
* An accumulator that will essentially periodically update the table with the data that
* An accumulator that will essentially periodically update the table with the data that
* is being provided to the accumulator.
*/
private class IncrementalUpdatingAccumulator extends SynchronizedListAccumulator<ROW_OBJECT> {

View File

@ -19,11 +19,8 @@ import static docking.widgets.table.threaded.TableUpdateJob.JobState.*;
import java.util.*;
import javax.swing.SwingUtilities;
import docking.widgets.table.*;
import ghidra.util.Msg;
import ghidra.util.SystemUtilities;
import ghidra.util.*;
import ghidra.util.datastruct.Algorithms;
import ghidra.util.exception.CancelledException;
import ghidra.util.task.TaskMonitor;
@ -46,8 +43,8 @@ import ghidra.util.task.TaskMonitor;
* are no add/removes in the list, then that step does nothing.
* <p>
* Before the job completes, new calls to sort and filter can be called. If the job is past the
* stage of the new call, the <code>monitor</code> is cancelled, causing the current stage to abort.
* The next state of this job is set to the appropriate state for the call, the monitor is
* stage of the new call, the <code>monitor</code> is cancelled, causing the current stage to abort.
* The next state of this job is set to the appropriate state for the call, the monitor is
* reset, and the job begins executing the next stage, based upon the new call.
*
* @param <T> the type of data that each row in the table represents.
@ -56,12 +53,12 @@ public class TableUpdateJob<T> {
//@formatter:off
static enum JobState {
NOT_RUNNING,
LOADING,
FILTERING,
ADD_REMOVING,
SORTING,
APPLYING,
NOT_RUNNING,
LOADING,
FILTERING,
ADD_REMOVING,
SORTING,
APPLYING,
DONE
}
//@formatter:on
@ -76,16 +73,19 @@ public class TableUpdateJob<T> {
private volatile boolean reloadData;
private volatile boolean doForceSort;
private volatile boolean doForceFilter = true; // always refilter, unless told not to (like when sorting)
// always refilter, unless told not to (like when sorting)
private volatile boolean doForceFilter = true;
private volatile TableSortingContext<T> newSortContext; // sort info to use for the next sort
private TableSortingContext<T> lastSortContext; // sort info that was used for the last complete sort
private TableSortingContext<T> lastSortContext; // sort info used for the last complete sort
protected List<AddRemoveListItem<T>> addRemoveList = new ArrayList<>();
private volatile JobState currentState;
private volatile JobState pendingRequestedState;
// Note: we leave this debug code here because debugging job flow is so difficult, that it
// is nice to not have to re-invent this when it is needed.
// Note: we leave this debug code here because debugging job flow is so difficult, that it is
// nice to not have to re-invent this when it is needed.
private List<JobState> debugStateHistory = new ArrayList<>();
/** a flag to signal that this job is no longer being used and can die a horrible death */
@ -98,8 +98,8 @@ public class TableUpdateJob<T> {
}
/**
* Meant to be called by subclasses, not clients. This method will trigger this job not
* to load data, but rather to use the given data.
* Meant to be called by subclasses, not clients. This method will trigger this job not to
* load data, but rather to use the given data.
*
* @param data The data to process.
*/
@ -108,9 +108,9 @@ public class TableUpdateJob<T> {
}
/**
* Allows the precise disabling of the filter operation. For example, when the user sorts,
* no filtering is needed. If the filter has changed, then a filter will take place,
* regardless of the state of this variable.
* Allows the precise disabling of the filter operation. For example, when the user sorts, no
* filtering is needed. If the filter has changed, then a filter will take place, regardless
* of the state of this variable.
*
* @param force false to reuse the current filter, if possible.
*/
@ -143,9 +143,9 @@ public class TableUpdateJob<T> {
}
/**
* Forces this job to completely reload the data, instead of copying from
* the model's current data. This call is not allowed on the currently running job and is only
* appropriate for a pending job.
* Forces this job to completely reload the data, instead of copying from the model's current
* data. This call is not allowed on the currently running job and is only appropriate for a
* pending job.
*/
public synchronized void reload() {
if (currentState != NOT_RUNNING) {
@ -163,11 +163,11 @@ public class TableUpdateJob<T> {
/**
* Adds the Add/Remove item to the list of items to be processed in the add/remove phase. This
* call is not allowed on running jobs, only pending jobs.
*
* @param item the add/remove item to add to the list of items to be processed in the add/remove
* phase of this job.
* @param maxAddRemoveCount the maximum number of add/remove jobs to queue before performing
* a full reload
*
* @param item the add/remove item to add to the list of items to be processed in the
* add/remove phase of this job.
* @param maxAddRemoveCount the maximum number of add/remove jobs to queue before performing a
* full reload
*/
public synchronized void addRemove(AddRemoveListItem<T> item, int maxAddRemoveCount) {
if (currentState != NOT_RUNNING) {
@ -187,22 +187,22 @@ public class TableUpdateJob<T> {
/**
* Sets the TableColumnComparator to use for sorting the data. This method can be called on
* the currently running job as well as the pending job. If called on the running job, the effect
* depends on the running job's state:
* the currently running job as well as the pending job. If called on the running job, the
* effect depends on the running job's state:
* <ul>
* <li>If the sort state hasn't happened yet, all it does is set the comparator for when
* the sort occurs.
* <li>If the sort state hasn't happened yet, all it does is set the comparator for when
* the sort occurs.
* <li>If the sort state has already been started or completed, then this method attempts
* to stop the current process phase and cause the state
* machine to return to the sort phase.
* <li>If the current job has already entered the DONE state, then the sort cannot
* take effect in this job and a false value is returned to indicate the
* to stop the current process phase and cause the state machine to return to the sort
* phase.
* <li>If the current job has already entered the DONE state, then the sort cannot take
* effect in this job and a false value is returned to indicate the
* sort was not handled by this job.
* </ul>
* </ul>
* @param newSortingContext the TableColumnComparator to use to sort the data.
* @param forceSort True signals to re-sort, even if this is already sorted
* @return true if the sort can be processed by this job, false if this job is essentially already
* completed and therefor cannot perform the sort job.
* @return true if the sort can be processed by this job, false if this job is essentially
* already completed and therefor cannot perform the sort job.
*/
public synchronized boolean requestSort(TableSortingContext<T> newSortingContext,
boolean forceSort) {
@ -212,7 +212,7 @@ public class TableUpdateJob<T> {
this.doForceSort = forceSort;
this.newSortContext = newSortingContext;
if (hasSorted()) {
// the user has requested a new sort, and we've already sorted, so we need to sort again
// a new sort has been requested, and we've already sorted, so we need to sort again
monitor.cancel();
pendingRequestedState = SORTING;
}
@ -220,21 +220,21 @@ public class TableUpdateJob<T> {
}
/**
* Tells the job that the filter criteria has changed. This method can be called on
* the currently running job as well as the pending job. If called on the running job, the
* effect depends on the running job's state:
* Tells the job that the filter criteria has changed. This method can be called on the
* currently running job as well as the pending job. If called on the running job, the effect
* depends on the running job's state:
* <ul>
* <li>If the filter state hasn't happened yet, then nothing needs to be done as this job
* will filter later anyway.
* <li>If the filter state has already been started or completed, then this method
* attempts to stop the current process phase and cause the state machine to
* return to the filter phase.
* <li>If the filter state hasn't happened yet, then nothing needs to be done as this job
* will filter later anyway.
* <li>If the filter state has already been started or completed, then this method
* attempts to stop the current process phase and cause the state machine to return to
* the filter phase.
* <li>If the current job has already entered the DONE state, then the filter cannot take
* effect in this job and a false value is returned to indicate the filter was
* not handled by this job.
* </ul>
* @return true if the filter can be processed by this job, false if this job is essentially already
* completed and therefor cannot perform the filter job.
* effect in this job and a false value is returned to indicate the filter was not
* handled by this job.
* </ul>
* @return true if the filter can be processed by this job, false if this job is essentially
* already completed and therefor cannot perform the filter job.
*/
public synchronized boolean requestFilter() {
if (currentState == DONE) {
@ -265,12 +265,12 @@ public class TableUpdateJob<T> {
}
/**
* Transitions to the next state of this state machine. Handles the special case if the
* monitor has been cancelled by a call to sort() or filter(). In either of these cases,
* the recover state would have been set and indicates that the monitor should be reset and
* the state machine should transition to the recover state instead of the next scheduled
* state. If the monitor has been cancelled, and no recover state has been set, then the
* job was cancelled by the user and the job will end.
* Transitions to the next state of this state machine. Handles the special case if the monitor
* has been cancelled by a call to sort() or filter(). In either of these cases, the recover
* state would have been set and indicates that the monitor should be reset and the state
* machine should transition to the recover state instead of the next scheduled state. If the
* monitor has been cancelled, and no recover state has been set, then the job was cancelled by
* the user and the job will end.
*/
private synchronized void gotoNextState() {
if (monitor.isCancelled()) {
@ -368,19 +368,19 @@ public class TableUpdateJob<T> {
}
/**
* Picks the table data to use for all future states (e.g., filtering, sorting, etc). Data
* can be reused if its filter is a superset of the pending filter. Likewise, if the
* pending filter is itself a superset of the current filter, then this code will walk
* backwards, starting at the current table data, until it finds either the root dataset or
* a child of the root whose filter is a superset of the pending filter.
* Picks the table data to use for all future states (e.g., filtering, sorting, etc). Data can
* be reused if its filter is a superset of the pending filter. Likewise, if the pending
* filter is itself a superset of the current filter, then this code will walk backwards,
* starting at the current table data, until it finds either the root dataset or a child of the
* root whose filter is a superset of the pending filter.
* <p>
* Reusing table data in this way has the potential to consume too much memory (in the case
* where the initial dataset is large and each subsequent filter is a subset of the
* previous filter, where each filter does't significantly reduce the newly filtered dataset.
* where the initial dataset is large and each subsequent filter is a subset of the previous
* filter, where each filter does't significantly reduce the newly filtered dataset.
* <p>
* Since much memory could be consumed, we provide an option in the tool to disable this
* reuse of filtered data. When not in use, each filter change will perform a full refilter.
* This is not an issue for tables with moderate to small-sized datasets.
* Since much memory could be consumed, we provide an option in the tool to disable this reuse
* of filtered data. When not in use, each filter change will perform a full refilter. This
* is not an issue for tables with moderate to small-sized datasets.
*
* @return the initial data to use for future filter and sort operations.
*/
@ -399,10 +399,10 @@ public class TableUpdateJob<T> {
return copy;
}
/**
/**
* Gets any existing data that matches the current filter, if any.
* @return data that should be the start point for the next filter state; null if there
* is no filter set or if the current data's filter does not match the pending filter
* @return data that should be the start point for the next filter state; null if there is no
* filter set or if the current data's filter does not match the pending filter
*/
private TableData<T> getReusableFilteredData() {
TableData<T> allTableData = model.getAllTableData();
@ -412,8 +412,8 @@ public class TableUpdateJob<T> {
}
if (currentAppliedData.isUnrelatedTo(allTableData)) {
// the data has changed such that the currently applied data is not progeny of
// the current master dataset
// the data has changed such that the currently applied data is not progeny of the
// current master dataset
return null;
}
@ -437,10 +437,10 @@ public class TableUpdateJob<T> {
}
if (tableSortDiffersFromSourceData()) {
// The source of the data we are manipulating is sorted in a way that may be
// different from the sort state of the table. In that case, we have to sort the
// data we are creating to match the table and not the source data--the table is
// always the truth keeper of the correct sort.
// The source of the data we are manipulating is sorted in a way that may be different
// from the sort state of the table. In that case, we have to sort the data we are
// creating to match the table and not the source data--the table is always the truth
// keeper of the correct sort.
newSortContext = model.getSortingContext();
return true;
}
@ -460,11 +460,11 @@ public class TableUpdateJob<T> {
/** True if the sort applied to the table is not the same as that in the source dataset */
private boolean tableSortDiffersFromSourceData() {
// Note: at this point in time we do not check to see if the table is user-unsorted. It
// doesn't seem to hurt to leave the original source data sorted, even if the
// current context is 'unsorted'. In that case, this method will return true,
// that the sorts are different. But, later in this job, we check the new sort and
// do not perform sorting when 'unsorted'
return !SystemUtilities.isEqual(sourceData.getSortContext(), model.getSortingContext());
// doesn't seem to hurt to leave the original source data sorted, even if the current
// context is 'unsorted'. In that case, this method will return true, that the sorts
// are different. But, later in this job, we check the new sort and do not perform
// sorting when 'unsorted'
return !Objects.equals(sourceData.getSortContext(), model.getSortingContext());
}
/**
@ -475,7 +475,7 @@ public class TableUpdateJob<T> {
if (lastSortContext == null || doForceSort) {
return false;
}
// we know that the direction of the sort is different because of a previous call to equals()
// we know that the direction of the sort is different due to a previous call to equals()
return lastSortContext.isReverseOf(newSortContext);
}
@ -523,10 +523,10 @@ public class TableUpdateJob<T> {
private void maybeSortSourceData() {
//
// Usually the source data is sorted before any filter is applied. However, this is not
// the case when a load of new data is followed directly by a filter action. We rely on
// the source data being sorted in order to perform fast translations from the table's
// view to the table's model when it is filtered. Thus, make sure that any time we are
// sorting the filtered data, that the source data too is sorted.
// the case when a load of new data is followed directly by a filter action. We rely on
// the source data being sorted in order to perform fast translations from the table's view
// to the table's model when it is filtered. Thus, make sure that any time we are sorting
// the filtered data, that the source data too is sorted.
//
if (sourceData == updatedData) {
// they are the same dataset; it will be sorted after this call
@ -613,8 +613,8 @@ public class TableUpdateJob<T> {
lastSortContext = updatedData.getSortContext();
}
/**
* The current data can be re-used when the data and filter have not changed
/**
* The current data can be re-used when the data and filter have not changed
* (this implies a sort only operation)
*/
private boolean canReuseCurrentFilteredData() {
@ -623,8 +623,8 @@ public class TableUpdateJob<T> {
// -we have not been told to filter
// -the table is not currently filtered, or is filtered, but
// --the source data that the filtered data is based upon hasn't changed
// --the filter hasn't changed
//
// --the filter hasn't changed
//
if (doForceFilter) {
return false;
}
@ -652,7 +652,7 @@ public class TableUpdateJob<T> {
TableData<T> allData = sourceData.getRootData();
try {
SwingUtilities.invokeAndWait(() -> {
Swing.runNow(() -> {
if (isFired) {
return; // in case we were cancelled whilst being posted
}

View File

@ -207,14 +207,12 @@ public abstract class ThreadedTableModel<ROW_OBJECT, DATA_SOURCE>
}
/**
* The basic method that all children must implement. This is where children load their
* data.
* @param accumulator the datastructure into which you should incrementally place you table
* row data
* The basic method that all children must implement. This is where children load their data.
* @param accumulator the datastructure into which you should incrementally place you table row
* data
* @param monitor the task monitor to check for cancellations and to update progress
*
* @throws CancelledException if the task monitor has been cancelled and a call is made
* to <code>monitor.checkCancelled();</code>.
* @throws CancelledException if the task monitor has been cancelled and a call is made to
* <code>monitor.checkCancelled();</code>.
*/
protected abstract void doLoad(Accumulator<ROW_OBJECT> accumulator, TaskMonitor monitor)
throws CancelledException;
@ -227,21 +225,21 @@ public abstract class ThreadedTableModel<ROW_OBJECT, DATA_SOURCE>
* <p><u>Performance Notes</u>
* <ul>
* <li>This method uses a {@link HashMap} to cache column values for a row object. Further,
* upon a key collision, the map will perform O(logn) lookups <b>if the
* key (the row object) is {@link Comparable}</b>. If the key is not comparable, then
* the collision lookups will be linear. So, make your row objects comparable
* for maximum speed <b>when your table size becomes large</b> (for small tables there
* is no observable impact).
* <li>Even if your row objects are comparable, relying on this table model to convert your
* row object into column values can be slow <b>for large tables</b>. This is because
* the default column comparison framework for the tables will call this method
* multiple times, resulting in many more method calls per column value lookup. For
* large data, the repeated method calls start to become noticeable. For maximum
* column sorting speed, use a comparator that works not on the column value, but on
* the row value. To do this, return a comparator from your model's
* {@link #createSortComparator(int)} method, instead of from the column itself or
* by relying on column item implementing {@link Comparable}. This is possible any
* time that a row object already has a field that is used for a given column.
* upon a key collision, the map will perform O(logn) lookups <b>if the key (the row
* object) is {@link Comparable}</b>. If the key is not comparable, then the collision
* lookups will be linear. So, make your row objects comparable for maximum speed
* <b>when your table size becomes large</b> (for small tables there is no observable
* impact).
* <li>Even if your row objects are comparable, relying on this table model to convert your
* row object into column values can be slow <b>for large tables</b>. This is because the
* default column comparison framework for the tables will call this method multiple
* times, resulting in many more method calls per column value lookup. For large data,
* the repeated method calls start to become noticeable. For maximum column sorting
* speed, use a comparator that works not on the column value, but on the row value. To
* do this, return a comparator from your model's {@link #createSortComparator(int)}
* method, instead of from the column itself or by relying on column item implementing
* {@link Comparable}. This is possible any time that a row object already has a field
* that is used for a given column.
* </ul>
*
* @param rowObject the row object
@ -253,8 +251,8 @@ public abstract class ThreadedTableModel<ROW_OBJECT, DATA_SOURCE>
Map<ROW_OBJECT, Map<Integer, Object>> cachedColumnValues = threadLocalColumnCache.get();
if (cachedColumnValues == null) {
// the caching has not been enabled--this must be a simple lookup from a client
// that cares not about speed
// the caching has not been enabled--this must be a simple lookup from a client that
// cares not about speed
return getColumnValueForRow(rowObject, columnIndex);
}
@ -413,11 +411,10 @@ public abstract class ThreadedTableModel<ROW_OBJECT, DATA_SOURCE>
/**
* Override this to change how filtering is performed. This implementation will do nothing
* if a <code>TableFilter</code> has not been set via a call to {@link #setTableFilter(TableFilter)}.
* if a <code>TableFilter</code> has not been set via a call to
* {@link #setTableFilter(TableFilter)}.
*
*
* @param data The list of data to be filtered.
*
* @param monitor the progress monitor to check for cancellation.
* @param lastSortingContext the comparator used to sort data. This can be used by overridden
* filter methods that need to query data about how the table is sorted.
@ -479,9 +476,9 @@ public abstract class ThreadedTableModel<ROW_OBJECT, DATA_SOURCE>
public void setTableFilter(TableFilter<ROW_OBJECT> tableFilter) {
this.pendingTableFilter = tableFilter;
if (pendingTableFilter == null) {
// Don't allow the pending filter to be null in this case. The client has changed
// the filter. If we use null, then we don't know the difference between a client
// change request or a simple refilter operation.
// Don't allow the pending filter to be null in this case. The client has changed the
// filter. If we use null, then we don't know the difference between a client change
// request or a simple refilter operation.
pendingTableFilter = new NullTableFilter<>();
}
reFilter();
@ -517,13 +514,13 @@ public abstract class ThreadedTableModel<ROW_OBJECT, DATA_SOURCE>
/**
* Removes the specified object from this model and schedules an update.
*
* <P>Note: for this method to function correctly, the given object must compare as
* {@link #equals(Object)} and have the same {@link #hashCode()} as the object to be removed
* <P>Note: for this method to function correctly, the given object must compare as
* {@link #equals(Object)} and have the same {@link #hashCode()} as the object to be removed
* from the table data. This allows clients to create proxy objects to pass into this method,
* as long as they honor those requirements.
* as long as they honor those requirements.
*
* <P>If this model's data is sorted, then a binary search will be used to locate the item
* to be removed. However, for this to work, all field used to sort the data must still be
* to be removed. However, for this to work, all field used to sort the data must still be
* available from the original object and must be the same values. If this is not true, then
* the binary search will not work and a brute force search will be used.
*
@ -544,7 +541,7 @@ public abstract class ThreadedTableModel<ROW_OBJECT, DATA_SOURCE>
//@formatter:off
// The data is changed when it is filtered OR when an item has been added or removed
boolean dataChanged = this.filteredData.getId() != filteredData.getId() ||
boolean dataChanged = this.filteredData.getId() != filteredData.getId() ||
this.filteredData.size() != filteredData.size();
//@formatter:on
this.allData = allData;
@ -622,17 +619,13 @@ public abstract class ThreadedTableModel<ROW_OBJECT, DATA_SOURCE>
}
/**
* Schedules the model to completely reload
* its underlying data.
* Schedules the model to completely reload its underlying data.
*/
public void reload() {
cancelCurrentWorkerJob();
updateManager.reload();
}
/**
* @see javax.swing.table.AbstractTableModel#fireTableChanged(javax.swing.event.TableModelEvent)
*/
@Override
public void fireTableChanged(TableModelEvent e) {
if (Swing.isSwingThread()) {
@ -643,25 +636,33 @@ public abstract class ThreadedTableModel<ROW_OBJECT, DATA_SOURCE>
}
/**
* Disposes this model.
* Once a model has been disposed, it cannot be reused.
* Disposes this model. Once a model has been disposed, it cannot be reused.
*/
@Override
public void dispose() {
// Signal to listeners now that the load is being cancelled. Otherwise, any delayed cancel
// callbacks will not works because the listeners have been removed. Any future requests
// to notifyFinished() due to cancelling below will have no effect because we clear the
// listeners.
Swing.runNow(() -> {
notifyFinished(true);
listeners.clear();
});
updateManager.dispose();
if (worker != null) {
worker.dispose();
}
doClearData();
disposeDynamicColumnData();
listeners.clear();
clearCache();
}
/**
* This method will clear all data and trigger fire a table data changed. Use this method to
* immediately clear all data. This is useful when you want to reload your table data and
* not have any old data hanging around being painted, which can produce odd results.
* immediately clear all data. This is useful when you want to reload your table data and not
* have any old data hanging around being painted, which can produce odd results.
*/
protected void clearData() {
doClearData();
@ -677,8 +678,7 @@ public abstract class ThreadedTableModel<ROW_OBJECT, DATA_SOURCE>
}
/**
* Cancels all current and pending updates to the model. Waits until all updates have
* been cancelled.
* Cancels all current and pending updates to the model.
*/
public void cancelAllUpdates() {
if (worker != null) {
@ -687,17 +687,14 @@ public abstract class ThreadedTableModel<ROW_OBJECT, DATA_SOURCE>
updateManager.cancelAllJobs();
}
/**
* @see javax.swing.table.TableModel#getRowCount()
*/
@Override
public int getRowCount() {
return filteredData.size();
}
/**
* Given a row index for the raw (unfiltered) model, return the corresponding index in the
* view (filtered) model.
* Given a row index for the raw (unfiltered) model, return the corresponding index in the view
* (filtered) model.
*
* @param modelRow The row index that corresponds to unfiltered data
* @return the index of that row in the filtered data
@ -719,8 +716,8 @@ public abstract class ThreadedTableModel<ROW_OBJECT, DATA_SOURCE>
}
/**
* Given a row index for the view (filtered) model, return the corresponding index in the
* raw (unfiltered) model.
* Given a row index for the view (filtered) model, return the corresponding index in the raw
* (unfiltered) model.
*
* @param viewRow The row index that corresponds to filtered data
* @return the index of that row in the unfiltered data
@ -756,7 +753,7 @@ public abstract class ThreadedTableModel<ROW_OBJECT, DATA_SOURCE>
/**
* Returns the name of this model.
* @return the name of this model
* @return the name of this model.
*/
@Override
public String getName() {
@ -766,7 +763,7 @@ public abstract class ThreadedTableModel<ROW_OBJECT, DATA_SOURCE>
/**
* Returns the corresponding row objects for the specified rows.
* @param rows the table rows
* @return the corresponding database keys
* @return the corresponding database keys.
*/
public List<ROW_OBJECT> getRowObjects(int[] rows) {
List<ROW_OBJECT> list = new ArrayList<>(rows.length);
@ -777,11 +774,11 @@ public abstract class ThreadedTableModel<ROW_OBJECT, DATA_SOURCE>
}
/**
* Sets the update delay, which is how long the model should wait before updating, after
* a change has been made the data
* Sets the update delay, which is how long the model should wait before updating, after a
* change has been made the data.
*
* @param updateDelayMillis the new update delay
* @param maxUpdateDelayMillis the new max update delay; updates will not wait past this time
* @param updateDelayMillis the new update delay.
* @param maxUpdateDelayMillis the new max update delay; updates will not wait past this time.
*/
void setUpdateDelay(int updateDelayMillis, int maxUpdateDelayMillis) {
this.minUpdateDelayMillis = updateDelayMillis;
@ -808,14 +805,14 @@ public abstract class ThreadedTableModel<ROW_OBJECT, DATA_SOURCE>
/**
* Returns the strategy to use for performing adds and removes to this table. Subclasses can
* override this method to customize this process for their particular type of data. See
* the implementations of {@link TableAddRemoveStrategy} for details.
* override this method to customize this process for their particular type of data. See the
* implementations of {@link TableAddRemoveStrategy} for details.
*
* <P>Note: The default add/remove strategy assumes that objects to be removed will be the
* same instance that is in the list of this model. This allows the {@link #equals(Object)}
* and {@link #hashCode()} to be used when removing the object from the list. If you model
* does not pass the same instance into {@link #removeObject(Object)}, then you will need to
* update your add/remove strategy accordingly.
* <P>Note: The default add/remove strategy assumes that objects to be removed will be the same
* instance that is in the list of this model. This allows the {@link #equals(Object)} and
* {@link #hashCode()} to be used when removing the object from the list. If you model does
* not pass the same instance into {@link #removeObject(Object)}, then you will need to update
* your add/remove strategy accordingly.
*
* @return the strategy
*/
@ -861,13 +858,13 @@ public abstract class ThreadedTableModel<ROW_OBJECT, DATA_SOURCE>
//
// Unusual Code Alert!
// It is odd that a 'notify' method is changing the state of a variable. This is a wart
// that we've chosen to live with. We have a variable that we want to stay around as
// long as the threaded update manager has work to do. We know that this method is
// called when no pending work remains. We use this signal to know that this crufty
// state variable can now be cleansed.
// that we've chosen to live with. We have a variable that we want to stay around as long
// as the threaded update manager has work to do. We know that this method is called when
// no pending work remains. We use this signal to know that this crufty state variable can
// now be cleansed.
//
// This variable may have already been cleared, but just in case of a cancel situation,
// we don't want this hanging around and affecting future sorts.
// This variable may have already been cleared, but just in case of a cancel situation, we
// don't want this hanging around and affecting future sorts.
//
pendingSortContext = null;
@ -893,8 +890,8 @@ public abstract class ThreadedTableModel<ROW_OBJECT, DATA_SOURCE>
//==================================================================================================
/**
* Standard (non-incremental) listener mechanism to receive notifications from the
* update manager.
* Standard (non-incremental) listener mechanism to receive notifications from the update
* manager.
*/
private class NonIncrementalUpdateManagerListener implements ThreadedTableModelListener {
@Override
@ -914,25 +911,24 @@ public abstract class ThreadedTableModel<ROW_OBJECT, DATA_SOURCE>
}
/**
* Listener to get updates from the {@link ThreadedTableModelUpdateMgr}. This listener
* is only here to make sure that non-loading actions, like sorting, will trigger
* notifications to clients. "Loading" events are handled by the listener passed to the
* Listener to get updates from the {@link ThreadedTableModelUpdateMgr}. This listener is only
* here to make sure that non-loading actions, like sorting, will trigger notifications to
* clients. "Loading" events are handled by the listener passed to the
* {@link IncrementalLoadJob} (this {@link IncrementalLoadJobListener}).
* <p>
* We need the two different listeners due to how they are wired to the update manager.
* The {@link IncrementalLoadJobListener} listener is added and removed for each load
* request. We need that listener so that during an incremental load, when multiple starts
* and stops come from the update manager, we don't keep adding and removing the progress
* bar. This works great for a normal loading processes. However, we still need a listener
* for when the users manipulates the data, like for filtering or sorting. Without having
* this listener, there is no way to get those notifications. Thus, this listener has
* to be careful not to "get in the way" of the loading listener--the loading listener will
* thus always take precedence.
* We need the two different listeners due to how they are wired to the update manager. The
* {@link IncrementalLoadJobListener} listener is added and removed for each load request. We
* need that listener so that during an incremental load, when multiple starts and stops come
* from the update manager, we don't keep adding and removing the progress bar. This works
* great for a normal loading processes. However, we still need a listener for when the users
* manipulates the data, like for filtering or sorting. Without having this listener, there is
* no way to get those notifications. Thus, this listener has to be careful not to "get in the
* way" of the loading listener--the loading listener will thus always take precedence.
*/
private class IncrementalUpdateManagerListener implements ThreadedTableModelListener {
@Override
public void loadPending() {
// don't care about a pending notification--another listener handles that.
// don't care about a pending notification--another listener handles that
}
@Override
@ -955,9 +951,9 @@ public abstract class ThreadedTableModel<ROW_OBJECT, DATA_SOURCE>
}
/**
* A special internal listener for the model to know when incremental jobs begin and end.
* This allows the model to ignore repeated start/finished events from the update manager
* when it is in 'load incrementally' mode.
* A special internal listener for the model to know when incremental jobs begin and end. This
* allows the model to ignore repeated start/finished events from the update manager when it is
* in 'load incrementally' mode.
*/
protected class IncrementalLoadJobListener extends IncrementalJobListener {
@Override
@ -972,8 +968,8 @@ public abstract class ThreadedTableModel<ROW_OBJECT, DATA_SOURCE>
}
/**
* A listener wrapper that will pass on notifications and then remove itself after
* the loadFinished() call so that not more events are broadcast.
* A listener wrapper that will pass on notifications and then remove itself after the
* loadFinished() call so that not more events are broadcast.
*/
private class OneTimeListenerWrapper implements ThreadedTableModelListener {
private final ThreadedTableModelListener delegate;

View File

@ -21,6 +21,7 @@ import java.util.concurrent.ConcurrentHashMap;
/**
* A listener set that is weakly consistent. This allows for iteration of the set while other
* threads modify the set.
* @param <T> the type
*/
public class ConcurrentListenerSet<T> implements Iterable<T> {

View File

@ -30,8 +30,9 @@ import ghidra.util.task.TaskMonitor;
/**
* Class that uses a single thread to execute scheduled jobs.
* <p>
* Subclasses provide the {@link BlockingQueue} implementation, which allows for controlling
* how jobs get scheduled (e.g., FIFO or priority-based).
* Subclasses provide the {@link BlockingQueue} implementation, which allows for controlling how
* jobs get scheduled (e.g., FIFO or priority-based).
* @param <T> the type
*/
public abstract class AbstractWorker<T extends Job> {
private ConcurrentQ<T, Object> concurrentQ;
@ -43,16 +44,15 @@ public abstract class AbstractWorker<T extends Job> {
/**
* Constructs a new Worker with the given name.
*
* @param queue the queue into which jobs will be place. e.g. PriorityBlockingQueue or
* LinkedBlockingQueue
* @param isPersistentThread if true, the worker thread will stay around when idle;
* false means that the thread will go away if not needed. Should be true for
* high frequency usage.
* @param name the name of this worker. The thread that executes the jobs will have this
* name.
* @param shareThreadPool true signals to use the given name to find/create a thread pool
* that can be shared throughout the system.
* @param monitor the task monitor that allows for cancelling of jobs.
* @param queue the queue into which jobs will be place (e.g. PriorityBlockingQueue or
* LinkedBlockingQueue).
* @param isPersistentThread if true, the worker thread will stay around when idle; false means
* that the thread will go away if not needed. Should be true for high frequency
* usage.
* @param name the name of this worker. The thread that executes the jobs will have this name.
* @param shareThreadPool true signals to use the given name to find/create a thread pool that
* can be shared throughout the system.
* @param monitor the task monitor that allows for cancelling of jobs.
*/
protected AbstractWorker(BlockingQueue<T> queue, boolean isPersistentThread, String name,
boolean shareThreadPool, TaskMonitor monitor) {
@ -73,7 +73,7 @@ public abstract class AbstractWorker<T extends Job> {
.setCancelClearsAllJobs(false)
.setJobsReportProgress(true)
.setMaxInProgress(1)
.build(callback);
.build(callback);
// @formatter:on
qProgressListener = new ProgressListener();
@ -162,7 +162,7 @@ public abstract class AbstractWorker<T extends Job> {
private static <K> boolean canSquashException(Throwable t, boolean isCancelled) {
//
// We have a policy of ignoring DB closed exceptions when a task has already
// We have a policy of ignoring DB closed exceptions when a task has already
// been cancelled, as this can happen when shutting down Ghidra.
//
if (!isCancelled) {
@ -210,16 +210,16 @@ public abstract class AbstractWorker<T extends Job> {
}
/**
* Clears any pending jobs and cancels any currently executing job.
* <p>
* <b>Warning: Calling this method may leave the program in a bad
* state. Thus, it is recommended that you only do so when you known that any job that
* could possibly be scheduled does not manipulate sensitive parts of the program; for
* example, opening file handles that should be closed before finishing.</b>
* <p><b>
* If you are unsure
* about whether your jobs handle interrupt correctly, then don't use this method.
* </b>
* Clears any pending jobs and cancels any currently executing job.
* <p>
* <b>Warning: Calling this method may leave the program in a bad state. Thus, it is
* recommended that you only do so when you known that any job that could possibly be scheduled
* does not manipulate sensitive parts of the program; for example, opening file handles that
* should be closed before finishing.</b>
* <p><b>
* If you are unsure about whether your jobs handle interrupt correctly, then don't use this
* method.
* </b>
*/
public void clearAllJobsWithInterrupt_IKnowTheRisks() {
clearAllJobs(true);
@ -233,8 +233,8 @@ public abstract class AbstractWorker<T extends Job> {
}
/**
* Clears any jobs from the queue <b>that have not yet been run</b>. This does not cancel
* the currently running job.
* Clears any jobs from the queue <b>that have not yet been run</b>. This does not cancel the
* currently running job.
*/
public void clearPendingJobs() {
concurrentQ.removeUnscheduledJobs();
@ -269,11 +269,12 @@ public abstract class AbstractWorker<T extends Job> {
}
/**
* This method will block until there are no scheduled jobs in this worker. This
* method assumes that all jobs have a priority less than Long.MAX_VALUE.
* This method will block until there are no scheduled jobs in this worker. This method assumes
* that all jobs have a priority less than Long.MAX_VALUE.
* <p>
* For a non-priority
* queue, this call will not wait for jobs that are scheduled after this call was made.
* For a non-priority queue, this call will not wait for jobs that are scheduled after this
* call was made.
* @param maxWait the max number of milliseconds to wait
*/
public void waitUntilNoJobsScheduled(int maxWait) {
try {