這是train.py裡的部分代碼
# Define Loss function. loss_func = BFLoss()** system = BeamTasNetSystem(pretrain=conf['main_args']['pretrain'], ** model=model, loss_func=loss_func, optimizer=optimizer, train_loader=train_loader, val_loader=val_loader, scheduler=scheduler, config=conf) # Define callbacks checkpoint_dir = os.path.join(exp_dir, 'checkpoints/') checkpoint = ModelCheckpoint(checkpoint_dir, monitor='val_loss', mode='min', save_top_k=5, verbose=1) early_stopping = False if conf['training']['early_stop']: early_stopping = EarlyStopping(monitor='val_loss', patience=10, verbose=1)
這是 "D:\ZhangXu\Code\Beam-Guided-TasNet-main\Beam-Guided-TasNet-main\system.py"的錯誤行代碼
class BeamTasNetSystem(System):** def __init__(self, pretrain, *args, **kwargs):** super().__init__(*args, **kwargs) self.pretrain = pretrain
這是C:\Users\SASPL-1\anaconda3\envs\zx_38\lib\site-packages\asteroid\engine\system.py"的錯誤行代碼
default_monitor: str = "val_loss" def __init__( self, model, optimizer, loss_func, train_loader, val_loader=None, scheduler=None, config=None, ): super().__init__() self.model = model self.optimizer = optimizer self.loss_func = loss_func self.train_loader = train_loader self.val_loader = val_loader self.scheduler = scheduler self.config = {} if config is None else config # hparams will be logged to Tensorboard as text variables. # summary writer doesn't support None for now, convert to strings. # See https://github.com/pytorch/pytorch/issues/33140 ** self.hparams = Namespace(**self.config_to_hparams(self.config))** def forward(self, *args, **kwargs): """Applies forward pass of the model. Returns: :class:`torch.Tensor` """ return self.model(*args, **kwargs)
這是"C:\Users\SASPL-1\anaconda3\envs\zx_38\lib\site-packages\torch\nn\modules\module.py"的錯誤行代碼
def __setattr__(self, name: str, value: Union[Tensor, 'Module']) -> None: def remove_from(*dicts_or_sets): for d in dicts_or_sets: if name in d: if isinstance(d, dict): del d[name] else: d.discard(name) params = self.__dict__.get('_parameters') if isinstance(value, Parameter): if params is None: raise AttributeError( "cannot assign parameters before Module.__init__() call") remove_from(self.__dict__, self._buffers, self._modules, self._non_persistent_buffers_set) self.register_parameter(name, value) elif params is not None and name in params: if value is not None: raise TypeError("cannot assign '{}' as parameter '{}' " "(torch.nn.Parameter or None expected)" .format(torch.typename(value), name)) self.register_parameter(name, value) else: modules = self.__dict__.get('_modules') if isinstance(value, Module): if modules is None: raise AttributeError( "cannot assign module before Module.__init__() call") remove_from(self.__dict__, self._parameters, self._buffers, self._non_persistent_buffers_set) modules[name] = value elif modules is not None and name in modules: if value is not None: raise TypeError("cannot assign '{}' as child module '{}' " "(torch.nn.Module or None expected)" .format(torch.typename(value), name)) modules[name] = value else: buffers = self.__dict__.get('_buffers') if buffers is not None and name in buffers: if value is not None and not isinstance(value, torch.Tensor): raise TypeError("cannot assign '{}' as buffer '{}' " "(torch.Tensor or None expected)" .format(torch.typename(value), name)) buffers[name] = value else: ** object.__setattr__(self, name, value)**
./run.sh --id 0,1,2,3 --stage 3Results from the following experiment will be stored in exp/train_convtasnet_reverb2reverb_8kmin_e2ff7662Stage 3: TrainingGlobal seed set to 0Traceback (most recent call last): File "train.py", line 137, in <module>{
'data': {
'bf_dir': None, 'mode': 'min', 'n_src': 2, 'sample_rate': 8000, 'task': 'reverb2reverb', 'train_dir': 'data/2speakers/wav8k/min/tr', 'valid_dir': 'data/2speakers/wav8k/min/cv'}, 'filterbank': {
'kernel_size': 16, 'n_channels': 4, 'n_filters': 256, 'stride': 8}, 'main_args': {
'exp_dir': 'exp/train_convtasnet_reverb2reverb_8kmin_e2ff7662/', 'help': None, 'pretrain': True}, 'masknet': {
'audio_chan': 4, 'bn_chan': 128, 'causal': False, 'conv_kernel_size': 3, 'hid_chan': 256, 'in_chan': 256, 'mask_act': 'relu', 'n_blocks': 8, 'n_repeats': 3, 'norm_type': 'gLN', 'skip_chan': 128}, 'optim': {
'lr': 0.001, 'optimizer': 'adam', 'weight_decay': 0.0}, 'positional arguments': {}, 'training': {
'batch_size': 12, main(arg_dic) 'early_stop': False, 'epochs': 75, 'half_lr': True, 'num_workers': 8}}Torch MVDR causality: FalseUsing stft {
'n_filters': 4096, 'kernel_size': 4096, 'stride': 1024}load data from data/2speakers/wav8k/min/tr\mix_reverb.json and ['data/2speakers/wav8k/min/tr\\s1_reverb.json', 'data/2speakers/wav8k/min/tr\\s2_reverb.json']Drop 2925 utts(0.26 h) from 20000 (shorter than 32000 samples)load data from data/2speakers/wav8k/min/cv\mix_reverb.json and ['data/2speakers/wav8k/min/cv\\s1_reverb.json', 'data/2speakers/wav8k/min/cv\\s2_reverb.json']Drop 701 utts(0.06 h) from 5000 (shorter than 32000 samples) File "train.py", line 67, in main system = BeamTasNetSystem(pretrain=conf['main_args']['pretrain'], File "D:\ZhangXu\Code\Beam-Guided-TasNet-main\Beam-Guided-TasNet-main\system.py", line 15, in __init__ super().__init__(*args, **kwargs) File "C:\Users\SASPL-1\anaconda3\envs\zx_38\lib\site-packages\asteroid\engine\system.py", line 61, in __init__ self.hparams = Namespace(**self.config_to_hparams(self.config)) File "C:\Users\SASPL-1\anaconda3\envs\zx_38\lib\site-packages\torch\nn\modules\module.py", line 1225, in __setattr__ object.__setattr__(self, name, value)AttributeError: can't set attributeStage 4: Trainingmv: cannot stat 'exp/train_convtasnet_reverb2reverb_8kmin_e2ff7662/checkpoints': No such file or directory
這個代碼是我從github上拿到的代碼,問過原作者他沒出現過此類問題。原作者代碼是在linux下運行的,而我是在windows下運行,不知道這和報錯原因有關嗎?或者和我安裝的torch和pytorch-lightning版本有問題?我都安裝的是最高版本。剛入門,希望大家幫助我,謝謝!