using
			
			 
			DataLoaders
			

			
			using
			
			 
			Flux
			

			
			using
			
			 
			DataAugmentation
			

			
			using
			
			 
			DeepLearningTasks
			

			
			using
			
			 
			DLDatasets
			

			
			using
			
			 
			MLDataPattern
			

			
			using
			
			 
			LearnBase
			

			
			using
			
			 
			ProgressBars
			

			
			using
			
			 
			FluxTraining
			

			
			using
			
			 
			FluxModels

			
			
			
			task
			 
			=
			 
			
			ImageClassification
			(
			10
			,
			 
			
			sz
			 
			=
			 
			
			(
			224
			,
			 
			224
			)
			)
			

			
			labeltoint
			 
			=
			 
			
			
			metadata
			(
			ImageNette
			)
			.
			
			labeltoclass
			

			
			
			obsfn
			(
			
			(
			image
			,
			 
			label
			)
			)
			 
			=
			 
			
			(
			image
			,
			 
			
			labeltoint
			[
			label
			]
			)

			obsfn (generic function with 1 method)

			
			
			
			
			trainds
			,
			 
			valds
			 
			=
			 
			
			
			DLDatasets
			.
			
			loaddataset
			(
			ImageNette
			,
			 
			
			"
			v2_160px
			"
			,
			 
			
			split
			 
			=
			 
			
			(
			
			"
			train
			"
			,
			 
			
			"
			val
			"
			)
			)
			

			
			bs
			 
			=
			 
			64
			

			
			traindl
			 
			=
			 
			
			taskdataloader
			(
			task
			,
			 
			trainds
			,
			 
			bs
			
			;
			 
			obsfn
			)
			
			

			
			valdl
			 
			=
			 
			
			taskdataloader
			(
			task
			,
			 
			valds
			,
			 
			
			2
			
			bs
			
			;
			 
			obsfn
			)
			;

			
			
			
			
			model
			 
			=
			 
			
			gpu
			(
			
			Chain
			(
			
			xresnet18
			(
			)
			,
			 
			
			
			FluxModels
			.
			
			classificationhead
			(
			
			task
			.
			
			nclasses
			,
			 
			512
			)
			)
			)
			;

			
			
			
			
			learner
			 
			=
			 
			

	
			Learner
			(
			
    
			model
			,
			
    
			
			(
			traindl
			,
			 
			valdl
			)
			,
			
    
			
			ADAM
			(
			)
			,
			
    
			
			
			Flux
			.
			
			Losses
			.
			
			logitcrossentropy
			,
			
    
			
			callbacks
			 
			=
			 
			
			[
			

	
			ToGPU
			(
			)
			]
			,
			
    
			
			metrics
			 
			=
			 
			
			[
			

	
			Metric
			(

	
			accuracy
			)
			]
			,
			
    
			
			schedule
			 
			=
			 
			
			Schedules
			(
			
			onecycleschedule
			(
			
			10
			 
			*
			 
			
			length
			(
			traindl
			)
			,
			 
			0.01
			)
			)
			

			)
			;

			
			
			
			
			FluxTraining
			.
			

	
			fit!
			(
			learner
			,
			 
			10
			)

			Epoch 1 TrainingPhase(): 100%|██████████████████████████| Time: 0:01:02
loss: 1.3166474935148849
accuracy: 0.5757865646258504
Epoch 2 ValidationPhase(): 100%|████████████████████████| Time: 0:00:13
loss: 1.9421841899553935
accuracy: 0.471875
Epoch 2 TrainingPhase(): 100%|██████████████████████████| Time: 0:01:05
loss: 1.001848922700298
accuracy: 0.678146258503401
Epoch 3 ValidationPhase(): 100%|████████████████████████| Time: 0:00:13
loss: 1.044497122367223
accuracy: 0.6622395833333337
Epoch 3 TrainingPhase(): 100%|██████████████████████████| Time: 0:01:06
loss: 0.8365915733940748
accuracy: 0.7292729591836736
Epoch 4 ValidationPhase(): 100%|████████████████████████| Time: 0:00:13
loss: 0.9936108271280925
accuracy: 0.6783854166666666
Epoch 4 TrainingPhase(): 100%|██████████████████████████| Time: 0:01:04
loss: 0.7450118934621617
accuracy: 0.7589285714285718
Epoch 5 ValidationPhase(): 100%|████████████████████████| Time: 0:00:13
loss: 1.0524701436360677
accuracy: 0.6640625000000001
Epoch 5 TrainingPhase(): 100%|██████████████████████████| Time: 0:01:05
loss: 0.6489803444366066
accuracy: 0.7909226190476191
Epoch 6 ValidationPhase(): 100%|████████████████████████| Time: 0:00:12
loss: 0.8496215959390004
accuracy: 0.7190104166666665
Epoch 6 TrainingPhase(): 100%|██████████████████████████| Time: 0:01:06
loss: 0.5658585832637995
accuracy: 0.8167517006802718
Epoch 7 ValidationPhase(): 100%|████████████████████████| Time: 0:00:13
loss: 0.7595664302508036
accuracy: 0.7575520833333333
Epoch 7 TrainingPhase(): 100%|██████████████████████████| Time: 0:01:05
loss: 0.5101636230540113
accuracy: 0.8338647959183668
Epoch 8 ValidationPhase(): 100%|████████████████████████| Time: 0:00:12
loss: 0.8600163320700328
accuracy: 0.7414062499999998
Epoch 8 TrainingPhase(): 100%|██████████████████████████| Time: 0:01:05
loss: 0.45264889228911626
accuracy: 0.8527848639455782
Epoch 9 ValidationPhase(): 100%|████████████████████████| Time: 0:00:13
loss: 0.8109628856182098
accuracy: 0.753125
Epoch 9 TrainingPhase(): 100%|██████████████████████████| Time: 0:01:05
loss: 0.40744140095451253
accuracy: 0.8603316326530616
Epoch 10 ValidationPhase(): 100%|███████████████████████| Time: 0:00:12
loss: 0.6673034459352494
accuracy: 0.7921874999999999
Epoch 10 TrainingPhase(): 100%|█████████████████████████| Time: 0:01:04
loss: 0.3401108379063963
accuracy: 0.885841836734694
Epoch 11 ValidationPhase(): 100%|███████████████████████| Time: 0:00:12
loss: 0.7720626552899679
accuracy: 0.7669270833333333

			Learner
  model: Chain{Tuple{Chain{Tuple{Chain{Tuple{Conv{2,4,typeof(identity),CUDA.CuArray{Float32,4},CUDA.CuArray{Float32,1}},BatchNorm{typeof(relu),CUDA.CuArray{Float32,1},CUDA.CuArray{Float32,1},Float32}}},Chain{Tuple{Conv{2,4,typeof(identity),CUDA.CuArray{Float32,4},CUDA.CuArray{Float32,1}},BatchNorm{typeof(relu),CUDA.CuArray{Float32,1},CUDA.CuArray{Float32,1},Float32}}},Chain{Tuple{Conv{2,4,typeof(identity),CUDA.CuArray{Float32,4},CUDA.CuArray{Float32,1}},BatchNorm{typeof(relu),CUDA.CuArray{Float32,1},CUDA.CuArray{Float32,1},Float32}}},MaxPool{2,4},Chain{Tuple{FluxModels.ResBlock,FluxModels.ResBlock}},Chain{Tuple{FluxModels.ResBlock,FluxModels.ResBlock}},Chain{Tuple{FluxModels.ResBlock,FluxModels.ResBlock}},Chain{Tuple{FluxModels.ResBlock,FluxModels.ResBlock}}}},Chain{Tuple{FluxModels.AdaptiveMeanPool{2},typeof(FluxModels.flatten),Dense{typeof(identity),CUDA.CuArray{Float32,2},CUDA.CuArray{Float32,1}}}}}}
  data: Tuple{DataLoaders.BufferGetObsParallel{Tuple{Array{Float32,4},Array{Float32,2}},DataLoaders.BatchViewCollated{DeepLearningTasks.MappedData}},DataLoaders.BufferGetObsParallel{Tuple{Array{Float32,4},Array{Float32,2}},DataLoaders.BatchViewCollated{DeepLearningTasks.MappedData}}}
  opt: ADAM
  lossfn: logitcrossentropy (function of type typeof(Flux.Losses.logitcrossentropy))
  params: Zygote.Params
  batch: FluxTraining.BatchState
  callbacks: FluxTraining.Callbacks
  cbstate: Dict{Symbol,Any}