Функция обучения train

_x000D_device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')_x000D__x000D__x000D_def get_acc_class(model_res, batch_y):_x000D_	_x000D_	"""_x000D_	Функция Accuracy для классификатора_x000D_	"""_x000D__x000D_	batch_y = torch.argmax(batch_y, dim=1)_x000D_	model_res = torch.argmax(model_res, dim=1)_x000D_	acc = torch.sum( torch.eq(batch_y, model_res) ).item()_x000D_		_x000D_	return acc_x000D__x000D__x000D_def train(dataset, model, epochs=5, debug=True):_x000D_	_x000D_	"""_x000D_	Функция обучения модели pyTorch_x000D_	"""_x000D_	_x000D_	optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)_x000D_	loss = nn.MSELoss()_x000D_	_x000D_	batch_size = 64_x000D_	model = model.to(device)_x000D_	k = 0.1_x000D__x000D_ # Разделить датасет на обучающий и проверочный_x000D_	train_dataset, val_dataset = torch.utils.data.random_split(_x000D_		dataset, [ round(len(dataset)*(1-k)), round(len(dataset)*k) ]_x000D_	)_x000D_	_x000D_	train_count = len(train_dataset)_x000D_	val_count = len(val_dataset)_x000D_	_x000D_	train_loader = DataLoader(_x000D_		train_dataset,_x000D_		batch_size=batch_size,_x000D_		drop_last=False,_x000D_		shuffle=True_x000D_	)_x000D_	_x000D_	val_loader = DataLoader(_x000D_		val_dataset,_x000D_		batch_size=batch_size,_x000D_		drop_last=False,_x000D_		shuffle=False_x000D_	)_x000D_	_x000D_	for step_index in range(epochs):_x000D_		_x000D_		loss_train = 0_x000D_		loss_val = 0_x000D_		acc_train = 0_x000D_		acc_val = 0_x000D_		count_train = 0_x000D_		count_val = 0_x000D_		batch_iter = 0_x000D_		_x000D_		model.train()_x000D_		_x000D_		# Обучение_x000D_		for batch_x, batch_y in train_loader:_x000D_			_x000D_			batch_x = batch_x.to(device)_x000D_			batch_y = batch_y.to(device)_x000D_			_x000D_			# Вычислим результат модели_x000D_			model_res = model(batch_x)_x000D_			_x000D_			# Найдем значение ошибки между ответом модели и правильными ответами_x000D_			loss_value = loss(model_res, batch_y)_x000D_			loss_train = loss_train + loss_value.item()_x000D_			acc_train = acc_train + get_acc(model_res, batch_y)_x000D_			_x000D_			# Вычислим градиент_x000D_			optimizer.zero_grad()_x000D_			loss_value.backward()_x000D_			_x000D_			# Оптимизируем_x000D_			optimizer.step()_x000D_			_x000D_			count_train = count_train + len(batch_x)_x000D_			batch_iter = batch_iter + len(batch_x)_x000D_			batch_iter_value = round(batch_iter / train_count * 100)_x000D_			if debug:_x000D_				print (f"rStep {step_index+1}, {batch_iter_value}%", end='')_x000D_			_x000D_			del batch_x, batch_y_x000D_			_x000D_			# Очистим кэш CUDA_x000D_			if torch.cuda.is_available():_x000D_				torch.cuda.empty_cache()_x000D_		_x000D_		model.eval()_x000D_		_x000D_		# Вычислим ошибку на тестовом датасете_x000D_		for batch_x, batch_y in val_loader:_x000D_			_x000D_			batch_x = batch_x.to(device)_x000D_			batch_y = batch_y.to(device)_x000D_			_x000D_			# Вычислим результат модели_x000D_			model_res = model(batch_x)_x000D_			_x000D_			# Найдем значение ошибки между ответом модели и правильными ответами_x000D_			loss_value = loss(model_res, batch_y)_x000D_			loss_val = loss_val + loss_value.item()_x000D_			acc_val = acc_val + get_acc(model_res, batch_y)_x000D_			_x000D_			count_val = count_val + len(batch_x)_x000D_			batch_iter = batch_iter + len(batch_x)_x000D_			batch_iter_value = round(batch_iter / train_count * 100)_x000D_			_x000D_			if debug:_x000D_				print (f"rStep {step_index+1}, {batch_iter_value}%", end='')_x000D_				_x000D_			del batch_x, batch_y_x000D_			_x000D_		_x000D_		# Отладочная информация_x000D_		if debug:_x000D_			loss_train = '%.3e' % loss_train_x000D_			loss_val = '%.3e' % loss_val_x000D_			_x000D_			acc_train = str(round(acc_train / count_train * 10000) / 10000).ljust(6, "0")_x000D_			acc_val = str(round(acc_val / count_val * 10000) / 10000).ljust(6, "0")_x000D_			_x000D_			print ("r", end='')_x000D_			print (f"Step {step_index+1}, " +_x000D_				f"acc: {acc_train}, acc_val: {acc_val}, " +_x000D_				f"loss: {loss_train}, loss_val: {loss_val}"_x000D_			)