欢迎您访问 最编程 本站为您分享编程语言代码,编程技术文章!
您现在的位置是: 首页

Yolov5:超乎想象的强大功能--新皇冠娱乐注册送66流行病中的口罩测试

最编程 2024-03-12 13:57:49
...
1. # YOLOv5 ???? by Ultralytics, GPL-3.0 license 2. """ 3. Train a YOLOv5 model on a custom dataset 4. 5. Usage: 6. $ python path/to/train.py --data coco128.yaml --weights yolov5s.pt --img 640 7. """ 8. import argparse 9. import math 10. import os 11. import random 12. import sys 13. import time 14. from copy import deepcopy 15. from datetime import datetime 16. from pathlib import Path 17. 18. import numpy as np 19. import torch 20. import torch.distributed as dist 21. import torch.nn as nn 22. import yaml 23. from torch.cuda import amp 24. from torch.nn.parallel import DistributedDataParallel as DDP 25. from torch.optim import SGD, Adam, lr_scheduler 26. from tqdm import tqdm 27. 28. FILE = Path(__file__).resolve() 29. ROOT = FILE.parents[0] # YOLOv5 root directory 30. if str(ROOT) not in sys.path: 31. sys.path.append(str(ROOT)) # add ROOT to PATH 32. ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative 33. 34. import val # for end-of-epoch mAP 35. from models.experimental import attempt_load 36. from models.yolo import Model 37. from utils.autoanchor import check_anchors 38. from utils.autobatch import check_train_batch_size 39. from utils.callbacks import Callbacks 40. from utils.datasets import create_dataloader 41. from utils.downloads import attempt_download 42. from utils.general import (LOGGER, NCOLS, check_dataset, check_file, check_git_status, check_img_size, 43. check_requirements, check_suffix, check_yaml, colorstr, get_latest_run, increment_path, 44. init_seeds, intersect_dicts, labels_to_class_weights, labels_to_image_weights, methods, 45. one_cycle, print_args, print_mutation, strip_optimizer) 46. from utils.loggers import Loggers 47. from utils.loggers.wandb.wandb_utils import check_wandb_resume 48. from utils.loss import ComputeLoss 49. from utils.metrics import fitness 50. from utils.plots import plot_evolve, plot_labels 51. from utils.torch_utils import EarlyStopping, ModelEMA, de_parallel, select_device, torch_distributed_zero_first 52. 53. LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html 54. RANK = int(os.getenv('RANK', -1)) 55. WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) 56. 57. 58. def train(hyp, # path/to/hyp.yaml or hyp dictionary 59. opt, 60. device, 61. callbacks 62. ): 63. save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze, = \ 64. Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \ 65. opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze 66. 67. # Directories 68. w = save_dir / 'weights' # weights dir 69. (w.parent if evolve else w).mkdir(parents=True, exist_ok=True) # make dir 70. last, best = w / 'last.pt', w / 'best.pt' 71. 72. # Hyperparameters 73. if isinstance(hyp, str): 74. with open(hyp, errors='ignore') as f: 75. hyp = yaml.safe_load(f) # load hyps dict 76. LOGGER.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) 77. 78. # Save run settings 79. with open(save_dir / 'hyp.yaml', 'w') as f: 80. yaml.safe_dump(hyp, f, sort_keys=False) 81. with open(save_dir / 'opt.yaml', 'w') as f: 82. yaml.safe_dump(vars(opt), f, sort_keys=False) 83. data_dict = None 84. 85. # Loggers 86. if RANK in [-1, 0]: 87. loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance 88. if loggers.wandb: 89. data_dict = loggers.wandb.data_dict 90. if resume: 91. weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp 92. 93. # Register actions 94. for k in methods(loggers): 95. callbacks.register_action(k, callback=getattr(loggers, k)) 96. 97. # Config 98. plots = not evolve # create plots 99. cuda = device.type != 'cpu' 100. init_seeds(1 + RANK) 101. with torch_distributed_zero_first(LOCAL_RANK): 102. data_dict = data_dict or check_dataset(data) # check if None 103. train_path, val_path = data_dict['train'], data_dict['val'] 104. nc = 1 if single_cls else int(data_dict['nc']) # number of classes 105. names = ['item'] if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names 106. assert len(names) == nc, f'{len(names)} names found for nc={nc} dataset in {data}' # check 107. is_coco = isinstance(val_path, str) and val_path.endswith('coco/val2017.txt') # COCO dataset 108. 109. # Model 110. check_suffix(weights, '.pt') # check weights 111. pretrained = weights.endswith('.pt') 112. if pretrained: 113. with torch_distributed_zero_first(LOCAL_RANK): 114. weights = attempt_download(weights) # download if not found locally 115. ckpt = torch.load(weights, map_location=device) # load checkpoint 116. model = Model(cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create 117. exclude = ['anchor'] if (cfg or hyp.get('anchors')) and not resume else [] # exclude keys 118. csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 119. csd = intersect_dicts(csd, model.state_dict(), exclude=exclude) # intersect 120. model.load_state_dict(csd, strict=False) # load 121. LOGGER.info(f'Transferred {len(csd)}/{len(model.state_dict())} items from {weights}') # report 122. else: 123. model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create 124. 125. # Freeze 126. freeze = [f'model.{x}.' for x in range(freeze)] # layers to freeze 127. for k, v in model.named_parameters(): 128. v.requires_grad = True # train all layers 129. if any(x in k for x in freeze): 130. LOGGER.info(f'freezing {k}') 131. v.requires_grad = False 132. 133. # Image size 134. gs = max(int(model.stride.max()), 32) # grid size (max stride) 135. imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2) # verify imgsz is gs-multiple 136. 137. # Batch size 138. if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size 139. batch_size = check_train_batch_size(model, imgsz) 140. 141. # Optimizer 142. nbs = 64 # nominal batch size 143. accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing 144. hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay 145. LOGGER.info(f"Scaled weight_decay = {hyp['weight_decay']}") 146. 147. g0, g1, g2 = [], [], [] # optimizer parameter groups 148. for v in model.modules(): 149. if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): # bias 150. g2.append(v.bias) 151. if isinstance(v, nn.BatchNorm2d): # weight (no decay) 152. g0.append(v.weight) 153. elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): # weight (with decay) 154. g1.append(v.weight) 155. 156. if opt.adam: 157. optimizer = Adam(g0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum 158. else: 159. optimizer = SGD(g0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True) 160. 161. optimizer.add_param_group({'params': g1, 'weight_decay': hyp['weight_decay']}) # add g1 with weight_decay 162. optimizer.add_param_group({'params': g2}) # add g2 (biases) 163. LOGGER.info(f"{colorstr('optimizer:')} {type(optimizer).__name__} with parameter groups " 164. f"{len(g0)} weight, {len(g1)} weight (no decay), {len(g2)} bias") 165. del g0, g1, g2 166. 167. # Scheduler 168. if opt.linear_lr: 169. lf = lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear 170. else: 171. lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf'] 172. scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) 173. 174. # EMA 175. ema = ModelEMA(model) if RANK in [-1, 0] else None 176. 177. # Resume 178. start_epoch, best_fitness = 0, 0.0 179. if pretrained: 180. # Optimizer 181. if ckpt['optimizer'] is not None: 182. optimizer.load_state_dict(ckpt['optimizer']) 183. best_fitness = ckpt['best_fitness'] 184. 185. # EMA 186. if ema and ckpt.get('ema'): 187. ema.ema.load_state_dict(ckpt['ema'].float().state_dict()) 188. ema.updates = ckpt['updates'] 189. 190. # Epochs 191. start_epoch = ckpt['epoch'] + 1 192. if resume: 193. assert start_epoch > 0, f'{weights} training to {epochs} epochs is finished, nothing to resume.' 194. if epochs < start_epoch: 195. LOGGER.info(f"{weights} has been trained for {ckpt['epoch']} epochs. Fine-tuning for {epochs} more epochs.") 196. epochs += ckpt['epoch'] # finetune additional epochs 197. 198. del ckpt, csd 199. 200. # DP mode 201. if cuda and RANK == -1 and torch.cuda.device_count() > 1: 202. LOGGER.warning('WARNING: DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n' 203. 'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.') 204. model = torch.nn.DataParallel(model) 205. 206. # SyncBatchNorm 207. if opt.sync_bn and cuda and RANK != -1: 208. model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) 209. LOGGER.info('Using SyncBatchNorm()') 210. 211. # Trainloader 212. train_loader, dataset = create_dataloader(train_path, imgsz, batch_size // WORLD_SIZE, gs, single_cls, 213. hyp=hyp, augment=True, cache=opt.cache, rect=opt.rect, rank=LOCAL_RANK, 214. workers=workers, image_weights=opt.image_weights, quad=opt.quad, 215. prefix=colorstr('train: '), shuffle=True) 216. mlc = int(np.concatenate(dataset.labels, 0)[:, 0].max()) # max label class 217. nb = len(train_loader) # number of batches 218. assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}' 219. 220. # Process 0 221. if RANK in [-1, 0]: 222. val_loader = create_dataloader(val_path, imgsz, batch_size // WORLD_SIZE * 2, gs, single_cls, 223. hyp=hyp, cache=None if noval else opt.cache, rect=True, rank=-1, 224. workers=workers, pad=0.5, 225. prefix=colorstr('val: '))[0] 226. 227. if not resume: 228. labels = np.concatenate(dataset.labels, 0) 229. # c = torch.tensor(labels[:, 0]) # classes 230. # cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency 231. # model._initialize_biases(cf.to(device)) 232. if plots: 233. plot_labels(labels, names, save_dir) 234. 235. # Anchors 236. if not opt.noautoanchor: 237. check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) 238. model.half().float() # pre-reduce anchor precision 239. 240. callbacks.run('on_pretrain_routine_end') 241. 242. # DDP mode 243. if cuda and RANK != -1: 244. model = DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK) 245. 246. # Model attributes 247. nl = de_parallel(model).model[-1].nl # number of detection layers (to scale hyps) 248. hyp['box'] *= 3 / nl # scale to layers 249. hyp['cls'] *= nc / 80 * 3 / nl # scale to classes and layers 250. hyp['obj'] *= (imgsz / 640) ** 2 * 3 / nl # scale to image size and layers 251. hyp['label_smoothing'] = opt.label_smoothing 252. model.nc = nc # attach number of classes to model 253. model.hyp = hyp # attach hyperparameters to model 254. model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights 255. model.names = names 256. 257. # Start training 258. t0 = time.time() 259. nw = max(round(hyp['warmup_epochs'] * nb), 1000) # number of warmup iterations, max(3 epochs, 1k iterations) 260. # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training 261. last_opt_step = -1 262. maps = np.zeros(nc) # mAP per class 263. results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) 264. scheduler.last_epoch = start_epoch - 1 # do not move 265. scaler = amp.GradScaler(enabled=cuda) 266. stopper = EarlyStopping(patience=opt.patience) 267. compute_loss = ComputeLoss(model) # init loss class 268. LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n' 269. f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n' 270. f"Logging results to {colorstr('bold', save_dir)}\n" 271. f'Starting training for {epochs} epochs...') 272. for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ 273. model.train() 274. 275. # Update image weights (optional, single-GPU only) 276. if opt.image_weights: 277. cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights 278. iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights 279. dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx 280. 281. # Update mosaic border (optional) 282. # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs) 283. # dataset.mosaic_border = [b - imgsz, -b] # height, width borders 284. 285. mloss = torch.zeros(3, device=device) # mean losses 286. if RANK != -1: 287. train_loader.sampler.set_epoch(epoch) 288. pbar = enumerate(train_loader) 289. LOGGER.info(('\n' + '%10s' * 7) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'labels', 'img_size')) 290. if RANK in [-1, 0]: 291. pbar = tqdm(pbar, total=nb, ncols=NCOLS, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar 292. optimizer.zero_grad() 293. for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- 294. ni = i + nb * epoch # number integrated batches (since train start) 295. imgs = imgs.to(device, non_blocking=True).float() / 255 # uint8 to float32, 0-255 to 0.0-1.0 296. 297. # Warmup 298. if ni <= nw: 299. xi = [0, nw] # x interp 300. # compute_loss.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou) 301. accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round()) 302. for j, x in enumerate(optimizer.param_groups): 303. # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 304. x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)]) 305. if 'momentum' in x: 306. x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']]) 307. 308. # Multi-scale 309. if opt.multi_scale: 310. sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size 311. sf = sz / max(imgs.shape[2:]) # scale factor 312. if sf != 1: 313. ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple) 314. imgs = nn.functional.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) 315. 316. # Forward 317. with amp.autocast(enabled=cuda):

上一篇: YOLOv5 车辆识别:使用 Python 实时检测车辆

下一篇: 使用 YOLO(只看一次)训练和识别灰度图(单通道)