iwlwifi: prepare for synchronous error dumps
In some cases it may be necessary to synchronously create a firmware error report, add the necessary infrastructure for this. Signed-off-by: Johannes Berg <johannes.berg@intel.com> Signed-off-by: Luca Coelho <luciano.coelho@intel.com> Link: https://lore.kernel.org/r/iwlwifi.20210802170640.481b6642f0fc.I7c9c958408a285e3d19aceed2a5a3341cfc08382@changeid Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
This commit is contained in:
		
							parent
							
								
									6ac5720086
								
							
						
					
					
						commit
						b8221b0f75
					
				| @ -1,7 +1,7 @@ | ||||
| // SPDX-License-Identifier: GPL-2.0-only
 | ||||
| /******************************************************************************
 | ||||
|  * | ||||
|  * Copyright(c) 2003 - 2014, 2018 - 2020  Intel Corporation. All rights reserved. | ||||
|  * Copyright(c) 2003 - 2014, 2018 - 2021  Intel Corporation. All rights reserved. | ||||
|  * Copyright(c) 2015 Intel Deutschland GmbH | ||||
|  * | ||||
|  * Portions of this file are derived from the ipw3945 project, as well | ||||
| @ -1950,7 +1950,7 @@ static void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| static void iwl_nic_error(struct iwl_op_mode *op_mode) | ||||
| static void iwl_nic_error(struct iwl_op_mode *op_mode, bool sync) | ||||
| { | ||||
| 	struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); | ||||
| 
 | ||||
|  | ||||
| @ -2530,51 +2530,6 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, | ||||
| } | ||||
| IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect); | ||||
| 
 | ||||
| int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt, | ||||
| 			   struct iwl_fwrt_dump_data *dump_data) | ||||
| { | ||||
| 	struct iwl_fw_ini_trigger_tlv *trig = dump_data->trig; | ||||
| 	enum iwl_fw_ini_time_point tp_id = le32_to_cpu(trig->time_point); | ||||
| 	u32 occur, delay; | ||||
| 	unsigned long idx; | ||||
| 
 | ||||
| 	if (!iwl_fw_ini_trigger_on(fwrt, trig)) { | ||||
| 		IWL_WARN(fwrt, "WRT: Trigger %d is not active, aborting dump\n", | ||||
| 			 tp_id); | ||||
| 		return -EINVAL; | ||||
| 	} | ||||
| 
 | ||||
| 	delay = le32_to_cpu(trig->dump_delay); | ||||
| 	occur = le32_to_cpu(trig->occurrences); | ||||
| 	if (!occur) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	trig->occurrences = cpu_to_le32(--occur); | ||||
| 
 | ||||
| 	/* Check there is an available worker.
 | ||||
| 	 * ffz return value is undefined if no zero exists, | ||||
| 	 * so check against ~0UL first. | ||||
| 	 */ | ||||
| 	if (fwrt->dump.active_wks == ~0UL) | ||||
| 		return -EBUSY; | ||||
| 
 | ||||
| 	idx = ffz(fwrt->dump.active_wks); | ||||
| 
 | ||||
| 	if (idx >= IWL_FW_RUNTIME_DUMP_WK_NUM || | ||||
| 	    test_and_set_bit(fwrt->dump.wks[idx].idx, &fwrt->dump.active_wks)) | ||||
| 		return -EBUSY; | ||||
| 
 | ||||
| 	fwrt->dump.wks[idx].dump_data = *dump_data; | ||||
| 
 | ||||
| 	IWL_WARN(fwrt, | ||||
| 		 "WRT: Collecting data: ini trigger %d fired (delay=%dms).\n", | ||||
| 		 tp_id, (u32)(delay / USEC_PER_MSEC)); | ||||
| 
 | ||||
| 	schedule_delayed_work(&fwrt->dump.wks[idx].wk, usecs_to_jiffies(delay)); | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt, | ||||
| 			    struct iwl_fw_dbg_trigger_tlv *trigger, | ||||
| 			    const char *fmt, ...) | ||||
| @ -2703,6 +2658,58 @@ out: | ||||
| 	clear_bit(wk_idx, &fwrt->dump.active_wks); | ||||
| } | ||||
| 
 | ||||
| int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt, | ||||
| 			   struct iwl_fwrt_dump_data *dump_data, | ||||
| 			   bool sync) | ||||
| { | ||||
| 	struct iwl_fw_ini_trigger_tlv *trig = dump_data->trig; | ||||
| 	enum iwl_fw_ini_time_point tp_id = le32_to_cpu(trig->time_point); | ||||
| 	u32 occur, delay; | ||||
| 	unsigned long idx; | ||||
| 
 | ||||
| 	if (!iwl_fw_ini_trigger_on(fwrt, trig)) { | ||||
| 		IWL_WARN(fwrt, "WRT: Trigger %d is not active, aborting dump\n", | ||||
| 			 tp_id); | ||||
| 		return -EINVAL; | ||||
| 	} | ||||
| 
 | ||||
| 	delay = le32_to_cpu(trig->dump_delay); | ||||
| 	occur = le32_to_cpu(trig->occurrences); | ||||
| 	if (!occur) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	trig->occurrences = cpu_to_le32(--occur); | ||||
| 
 | ||||
| 	/* Check there is an available worker.
 | ||||
| 	 * ffz return value is undefined if no zero exists, | ||||
| 	 * so check against ~0UL first. | ||||
| 	 */ | ||||
| 	if (fwrt->dump.active_wks == ~0UL) | ||||
| 		return -EBUSY; | ||||
| 
 | ||||
| 	idx = ffz(fwrt->dump.active_wks); | ||||
| 
 | ||||
| 	if (idx >= IWL_FW_RUNTIME_DUMP_WK_NUM || | ||||
| 	    test_and_set_bit(fwrt->dump.wks[idx].idx, &fwrt->dump.active_wks)) | ||||
| 		return -EBUSY; | ||||
| 
 | ||||
| 	fwrt->dump.wks[idx].dump_data = *dump_data; | ||||
| 
 | ||||
| 	if (sync) | ||||
| 		delay = 0; | ||||
| 
 | ||||
| 	IWL_WARN(fwrt, | ||||
| 		 "WRT: Collecting data: ini trigger %d fired (delay=%dms).\n", | ||||
| 		 tp_id, (u32)(delay / USEC_PER_MSEC)); | ||||
| 
 | ||||
| 	schedule_delayed_work(&fwrt->dump.wks[idx].wk, usecs_to_jiffies(delay)); | ||||
| 
 | ||||
| 	if (sync) | ||||
| 		iwl_fw_dbg_collect_sync(fwrt, idx); | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| void iwl_fw_error_dump_wk(struct work_struct *work) | ||||
| { | ||||
| 	struct iwl_fwrt_wk_data *wks = | ||||
|  | ||||
| @ -46,7 +46,8 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt, | ||||
| int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt, | ||||
| 			     enum iwl_fw_dbg_trigger trig_type); | ||||
| int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt, | ||||
| 			   struct iwl_fwrt_dump_data *dump_data); | ||||
| 			   struct iwl_fwrt_dump_data *dump_data, | ||||
| 			   bool sync); | ||||
| int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, | ||||
| 		       enum iwl_fw_dbg_trigger trig, const char *str, | ||||
| 		       size_t len, struct iwl_fw_dbg_trigger_tlv *trigger); | ||||
| @ -284,7 +285,7 @@ static inline void iwl_fw_umac_set_alive_err_table(struct iwl_trans *trans, | ||||
| 		trans->dbg.umac_error_event_table = umac_error_event_table; | ||||
| } | ||||
| 
 | ||||
| static inline void iwl_fw_error_collect(struct iwl_fw_runtime *fwrt) | ||||
| static inline void iwl_fw_error_collect(struct iwl_fw_runtime *fwrt, bool sync) | ||||
| { | ||||
| 	enum iwl_fw_ini_time_point tp_id; | ||||
| 
 | ||||
| @ -300,7 +301,7 @@ static inline void iwl_fw_error_collect(struct iwl_fw_runtime *fwrt) | ||||
| 		tp_id = IWL_FW_INI_TIME_POINT_FW_ASSERT; | ||||
| 	} | ||||
| 
 | ||||
| 	iwl_dbg_tlv_time_point(fwrt, tp_id, NULL); | ||||
| 	_iwl_dbg_tlv_time_point(fwrt, tp_id, NULL, sync); | ||||
| } | ||||
| 
 | ||||
| void iwl_fw_error_print_fseq_regs(struct iwl_fw_runtime *fwrt); | ||||
|  | ||||
| @ -683,7 +683,7 @@ static void iwl_dbg_tlv_periodic_trig_handler(struct timer_list *t) | ||||
| 	}; | ||||
| 	int ret; | ||||
| 
 | ||||
| 	ret = iwl_fw_dbg_ini_collect(timer_node->fwrt, &dump_data); | ||||
| 	ret = iwl_fw_dbg_ini_collect(timer_node->fwrt, &dump_data, false); | ||||
| 	if (!ret || ret == -EBUSY) { | ||||
| 		u32 occur = le32_to_cpu(dump_data.trig->occurrences); | ||||
| 		u32 collect_interval = le32_to_cpu(dump_data.trig->data[0]); | ||||
| @ -927,7 +927,7 @@ static bool iwl_dbg_tlv_check_fw_pkt(struct iwl_fw_runtime *fwrt, | ||||
| } | ||||
| 
 | ||||
| static int | ||||
| iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, | ||||
| iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, bool sync, | ||||
| 		       struct list_head *active_trig_list, | ||||
| 		       union iwl_dbg_tlv_tp_data *tp_data, | ||||
| 		       bool (*data_check)(struct iwl_fw_runtime *fwrt, | ||||
| @ -946,7 +946,7 @@ iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, | ||||
| 		int ret, i; | ||||
| 
 | ||||
| 		if (!num_data) { | ||||
| 			ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data); | ||||
| 			ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data, sync); | ||||
| 			if (ret) | ||||
| 				return ret; | ||||
| 		} | ||||
| @ -955,7 +955,7 @@ iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, | ||||
| 			if (!data_check || | ||||
| 			    data_check(fwrt, &dump_data, tp_data, | ||||
| 				       le32_to_cpu(dump_data.trig->data[i]))) { | ||||
| 				ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data); | ||||
| 				ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data, sync); | ||||
| 				if (ret) | ||||
| 					return ret; | ||||
| 
 | ||||
| @ -1043,9 +1043,10 @@ static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| void iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt, | ||||
| 			    enum iwl_fw_ini_time_point tp_id, | ||||
| 			    union iwl_dbg_tlv_tp_data *tp_data) | ||||
| void _iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt, | ||||
| 			     enum iwl_fw_ini_time_point tp_id, | ||||
| 			     union iwl_dbg_tlv_tp_data *tp_data, | ||||
| 			     bool sync) | ||||
| { | ||||
| 	struct list_head *hcmd_list, *trig_list; | ||||
| 
 | ||||
| @ -1060,12 +1061,12 @@ void iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt, | ||||
| 	switch (tp_id) { | ||||
| 	case IWL_FW_INI_TIME_POINT_EARLY: | ||||
| 		iwl_dbg_tlv_init_cfg(fwrt); | ||||
| 		iwl_dbg_tlv_tp_trigger(fwrt, trig_list, tp_data, NULL); | ||||
| 		iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL); | ||||
| 		break; | ||||
| 	case IWL_FW_INI_TIME_POINT_AFTER_ALIVE: | ||||
| 		iwl_dbg_tlv_apply_buffers(fwrt); | ||||
| 		iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list); | ||||
| 		iwl_dbg_tlv_tp_trigger(fwrt, trig_list, tp_data, NULL); | ||||
| 		iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL); | ||||
| 		break; | ||||
| 	case IWL_FW_INI_TIME_POINT_PERIODIC: | ||||
| 		iwl_dbg_tlv_set_periodic_trigs(fwrt); | ||||
| @ -1075,13 +1076,13 @@ void iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt, | ||||
| 	case IWL_FW_INI_TIME_POINT_MISSED_BEACONS: | ||||
| 	case IWL_FW_INI_TIME_POINT_FW_DHC_NOTIFICATION: | ||||
| 		iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list); | ||||
| 		iwl_dbg_tlv_tp_trigger(fwrt, trig_list, tp_data, | ||||
| 		iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, | ||||
| 				       iwl_dbg_tlv_check_fw_pkt); | ||||
| 		break; | ||||
| 	default: | ||||
| 		iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list); | ||||
| 		iwl_dbg_tlv_tp_trigger(fwrt, trig_list, tp_data, NULL); | ||||
| 		iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL); | ||||
| 		break; | ||||
| 	} | ||||
| } | ||||
| IWL_EXPORT_SYMBOL(iwl_dbg_tlv_time_point); | ||||
| IWL_EXPORT_SYMBOL(_iwl_dbg_tlv_time_point); | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ | ||||
| /*
 | ||||
|  * Copyright (C) 2018-2020 Intel Corporation | ||||
|  * Copyright (C) 2018-2021 Intel Corporation | ||||
|  */ | ||||
| #ifndef __iwl_dbg_tlv_h__ | ||||
| #define __iwl_dbg_tlv_h__ | ||||
| @ -48,9 +48,25 @@ void iwl_dbg_tlv_free(struct iwl_trans *trans); | ||||
| void iwl_dbg_tlv_alloc(struct iwl_trans *trans, const struct iwl_ucode_tlv *tlv, | ||||
| 		       bool ext); | ||||
| void iwl_dbg_tlv_init(struct iwl_trans *trans); | ||||
| void iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt, | ||||
| 			    enum iwl_fw_ini_time_point tp_id, | ||||
| 			    union iwl_dbg_tlv_tp_data *tp_data); | ||||
| void _iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt, | ||||
| 			     enum iwl_fw_ini_time_point tp_id, | ||||
| 			     union iwl_dbg_tlv_tp_data *tp_data, | ||||
| 			     bool sync); | ||||
| 
 | ||||
| static inline void iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt, | ||||
| 					  enum iwl_fw_ini_time_point tp_id, | ||||
| 					  union iwl_dbg_tlv_tp_data *tp_data) | ||||
| { | ||||
| 	_iwl_dbg_tlv_time_point(fwrt, tp_id, tp_data, false); | ||||
| } | ||||
| 
 | ||||
| static inline void iwl_dbg_tlv_time_point_sync(struct iwl_fw_runtime *fwrt, | ||||
| 					       enum iwl_fw_ini_time_point tp_id, | ||||
| 					       union iwl_dbg_tlv_tp_data *tp_data) | ||||
| { | ||||
| 	_iwl_dbg_tlv_time_point(fwrt, tp_id, tp_data, true); | ||||
| } | ||||
| 
 | ||||
| void iwl_dbg_tlv_del_timers(struct iwl_trans *trans); | ||||
| 
 | ||||
| #endif /* __iwl_dbg_tlv_h__*/ | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 | ||||
| /*
 | ||||
|  * Copyright (C) 2003-2014, 2018-2020 Intel Corporation | ||||
|  * Copyright (C) 2003-2014, 2018-2021 Intel Corporation | ||||
|  * Copyright (C) 2015-2016 Intel Deutschland GmbH | ||||
|  */ | ||||
| #include <linux/delay.h> | ||||
| @ -468,5 +468,5 @@ void iwl_trans_sync_nmi_with_addr(struct iwl_trans *trans, u32 inta_addr, | ||||
| 	if (interrupts_enabled) | ||||
| 		iwl_trans_interrupts(trans, true); | ||||
| 
 | ||||
| 	iwl_trans_fw_error(trans); | ||||
| 	iwl_trans_fw_error(trans, false); | ||||
| } | ||||
|  | ||||
| @ -78,7 +78,7 @@ struct iwl_cfg; | ||||
|  *	there are Tx packets pending in the transport layer. | ||||
|  *	Must be atomic | ||||
|  * @nic_error: error notification. Must be atomic and must be called with BH | ||||
|  *	disabled. | ||||
|  *	disabled, unless the sync parameter is true. | ||||
|  * @cmd_queue_full: Called when the command queue gets full. Must be atomic and | ||||
|  *	called with BH disabled. | ||||
|  * @nic_config: configure NIC, called before firmware is started. | ||||
| @ -102,7 +102,7 @@ struct iwl_op_mode_ops { | ||||
| 	void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue); | ||||
| 	bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state); | ||||
| 	void (*free_skb)(struct iwl_op_mode *op_mode, struct sk_buff *skb); | ||||
| 	void (*nic_error)(struct iwl_op_mode *op_mode); | ||||
| 	void (*nic_error)(struct iwl_op_mode *op_mode, bool sync); | ||||
| 	void (*cmd_queue_full)(struct iwl_op_mode *op_mode); | ||||
| 	void (*nic_config)(struct iwl_op_mode *op_mode); | ||||
| 	void (*wimax_active)(struct iwl_op_mode *op_mode); | ||||
| @ -181,9 +181,9 @@ static inline void iwl_op_mode_free_skb(struct iwl_op_mode *op_mode, | ||||
| 	op_mode->ops->free_skb(op_mode, skb); | ||||
| } | ||||
| 
 | ||||
| static inline void iwl_op_mode_nic_error(struct iwl_op_mode *op_mode) | ||||
| static inline void iwl_op_mode_nic_error(struct iwl_op_mode *op_mode, bool sync) | ||||
| { | ||||
| 	op_mode->ops->nic_error(op_mode); | ||||
| 	op_mode->ops->nic_error(op_mode, sync); | ||||
| } | ||||
| 
 | ||||
| static inline void iwl_op_mode_cmd_queue_full(struct iwl_op_mode *op_mode) | ||||
|  | ||||
| @ -1385,14 +1385,14 @@ iwl_trans_release_nic_access(struct iwl_trans *trans) | ||||
| 	__release(nic_access); | ||||
| } | ||||
| 
 | ||||
| static inline void iwl_trans_fw_error(struct iwl_trans *trans) | ||||
| static inline void iwl_trans_fw_error(struct iwl_trans *trans, bool sync) | ||||
| { | ||||
| 	if (WARN_ON_ONCE(!trans->op_mode)) | ||||
| 		return; | ||||
| 
 | ||||
| 	/* prevent double restarts due to the same erroneous FW */ | ||||
| 	if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) { | ||||
| 		iwl_op_mode_nic_error(trans->op_mode); | ||||
| 		iwl_op_mode_nic_error(trans->op_mode, sync); | ||||
| 		trans->state = IWL_TRANS_NO_FW; | ||||
| 	} | ||||
| } | ||||
|  | ||||
| @ -1400,7 +1400,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) | ||||
| 	 * can't recover this since we're already half suspended. | ||||
| 	 */ | ||||
| 	if (!mvm->fw_restart && fw_error) { | ||||
| 		iwl_fw_error_collect(&mvm->fwrt); | ||||
| 		iwl_fw_error_collect(&mvm->fwrt, false); | ||||
| 	} else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { | ||||
| 		struct iwl_mvm_reprobe *reprobe; | ||||
| 
 | ||||
| @ -1451,7 +1451,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) | ||||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 		iwl_fw_error_collect(&mvm->fwrt); | ||||
| 		iwl_fw_error_collect(&mvm->fwrt, false); | ||||
| 
 | ||||
| 		if (fw_error && mvm->fw_restart > 0) | ||||
| 			mvm->fw_restart--; | ||||
| @ -1459,13 +1459,23 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode) | ||||
| static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode, bool sync) | ||||
| { | ||||
| 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); | ||||
| 
 | ||||
| 	if (!test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) | ||||
| 		iwl_mvm_dump_nic_error_log(mvm); | ||||
| 
 | ||||
| 	if (sync) { | ||||
| 		iwl_fw_error_collect(&mvm->fwrt, true); | ||||
| 		/*
 | ||||
| 		 * Currently, the only case for sync=true is during | ||||
| 		 * shutdown, so just stop in this case. If/when that | ||||
| 		 * changes, we need to be a bit smarter here. | ||||
| 		 */ | ||||
| 		return; | ||||
| 	} | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * If the firmware crashes while we're already considering it | ||||
| 	 * to be dead then don't ask for a restart, that cannot do | ||||
|  | ||||
| @ -1656,7 +1656,7 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) | ||||
| 
 | ||||
| 	/* The STATUS_FW_ERROR bit is set in this function. This must happen
 | ||||
| 	 * before we wake up the command caller, to ensure a proper cleanup. */ | ||||
| 	iwl_trans_fw_error(trans); | ||||
| 	iwl_trans_fw_error(trans, false); | ||||
| 
 | ||||
| 	clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); | ||||
| 	wake_up(&trans->wait_command_queue); | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user