湖北省疾病预防控制中心官方网站,做中东市场哪个网站合适,深圳燃气公司官网,大型网站建设兴田德润实惠#x1f6a9;#x1f6a9;#x1f6a9;Hugging Face 实战系列 总目录 有任何问题欢迎在下面留言 本篇文章的代码运行界面均在PyCharm中进行 本篇文章配套的代码资源已经上传 从零构建属于自己的GPT系列1#xff1a;文本数据预处理 从零构建属于自己的GPT系列2#xff1a;语…Hugging Face 实战系列 总目录 有任何问题欢迎在下面留言 本篇文章的代码运行界面均在PyCharm中进行 本篇文章配套的代码资源已经上传 从零构建属于自己的GPT系列1文本数据预处理 从零构建属于自己的GPT系列2语言模型训练
3 数据加载函数
def load_dataset(logger, args):加载训练集logger.info(loading training dataset)train_path args.train_pathwith open(train_path, rb) as f:train_list pickle.load(f)# test# train_list train_list[:24]train_dataset CPMDataset(train_list, args.max_len)return train_datasetList item
4 训练函数
def train(model, logger, train_dataset, args):train_dataloader DataLoader(train_dataset, batch_sizeargs.batch_size, shuffleTrue, num_workersargs.num_workers, collate_fncollate_fn,drop_lastTrue)logger.info(total_steps:{}.format(len(train_dataloader)* args.epochs))t_total len(train_dataloader) // args.gradient_accumulation_steps * args.epochsoptimizer transformers.AdamW(model.parameters(), lrargs.lr, epsargs.eps)scheduler transformers.get_linear_schedule_with_warmup(optimizer, num_warmup_stepsargs.warmup_steps, num_training_stepst_total)# 设置warmuplogger.info(start training)train_losses [] # 记录每个epoch的平均loss# start training #for epoch in range(args.epochs):train_loss train_epoch(modelmodel, train_dataloadertrain_dataloader,optimizeroptimizer, schedulerscheduler,loggerlogger, epochepoch, argsargs)train_losses.append(round(train_loss, 4))logger.info(train loss list:{}.format(train_losses))logger.info(training finished)logger.info(train_losses:{}.format(train_losses))5 迭代训练函数
def train_epoch(model, train_dataloader, optimizer, scheduler, logger,epoch, args):model.train()device args.deviceignore_index args.ignore_indexepoch_start_time datetime.now()total_loss 0 # 记录下整个epoch的loss的总和epoch_correct_num 0 # 每个epoch中,预测正确的word的数量epoch_total_num 0 # 每个epoch中,预测的word的总数量for batch_idx, (input_ids, labels) in enumerate(train_dataloader):# 捕获cuda out of memory exceptiontry:input_ids input_ids.to(device)labels labels.to(device)outputs model.forward(input_ids, labelslabels)logits outputs.logitsloss outputs.lossloss loss.mean()# 统计该batch的预测token的正确数与总数batch_correct_num, batch_total_num calculate_acc(logits, labels, ignore_indexignore_index)# 统计该epoch的预测token的正确数与总数epoch_correct_num batch_correct_numepoch_total_num batch_total_num# 计算该batch的accuracybatch_acc batch_correct_num / batch_total_numtotal_loss loss.item()if args.gradient_accumulation_steps 1:loss loss / args.gradient_accumulation_stepsloss.backward()# 梯度裁剪torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)# 进行一定step的梯度累计之后更新参数if (batch_idx 1) % args.gradient_accumulation_steps 0:# 更新参数optimizer.step()# 更新学习率scheduler.step()# 清空梯度信息optimizer.zero_grad()if (batch_idx 1) % args.log_step 0:logger.info(batch {} of epoch {}, loss {}, batch_acc {}, lr {}.format(batch_idx 1, epoch 1, loss.item() * args.gradient_accumulation_steps, batch_acc, scheduler.get_lr()))del input_ids, outputsexcept RuntimeError as exception:if out of memory in str(exception):logger.info(WARNING: ran out of memory)if hasattr(torch.cuda, empty_cache):torch.cuda.empty_cache()else:logger.info(str(exception))raise exception# 记录当前epoch的平均loss与accuracyepoch_mean_loss total_loss / len(train_dataloader)epoch_mean_acc epoch_correct_num / epoch_total_numlogger.info(epoch {}: loss {}, predict_acc {}.format(epoch 1, epoch_mean_loss, epoch_mean_acc))# save modellogger.info(saving model for epoch {}.format(epoch 1))model_path join(args.save_model_path, epoch{}.format(epoch 1))if not os.path.exists(model_path):os.mkdir(model_path)model_to_save model.module if hasattr(model, module) else modelmodel_to_save.save_pretrained(model_path)logger.info(epoch {} finished.format(epoch 1))epoch_finish_time datetime.now()logger.info(time for one epoch: {}.format(epoch_finish_time - epoch_start_time))return epoch_mean_loss从零构建属于自己的GPT系列1文本数据预处理 从零构建属于自己的GPT系列2语言模型训练