77
88import torch_optimizer as optim
99
10- plt .style .use (' seaborn-white' )
10+ plt .style .use (" seaborn-white" )
1111
1212
1313def rosenbrock (tensor ):
1414 # https://en.wikipedia.org/wiki/Test_functions_for_optimization
1515 x , y = tensor
16- return (1 - x ) ** 2 + 100 * (y - x ** 2 ) ** 2
16+ return (1 - x ) ** 2 + 100 * (y - x ** 2 ) ** 2
1717
1818
1919def rastrigin (tensor , lib = torch ):
@@ -22,8 +22,8 @@ def rastrigin(tensor, lib=torch):
2222 A = 10
2323 f = (
2424 A * 2
25- + (x ** 2 - A * lib .cos (x * math .pi * 2 ))
26- + (y ** 2 - A * lib .cos (y * math .pi * 2 ))
25+ + (x ** 2 - A * lib .cos (x * math .pi * 2 ))
26+ + (y ** 2 - A * lib .cos (y * math .pi * 2 ))
2727 )
2828 return f
2929
@@ -47,8 +47,8 @@ def execute_steps(
4747
4848
4949def objective_rastrigin (params ):
50- lr = params ['lr' ]
51- optimizer_class = params [' optimizer_class' ]
50+ lr = params ["lr" ]
51+ optimizer_class = params [" optimizer_class" ]
5252 initial_state = (- 2.0 , 3.5 )
5353 minimum = (0 , 0 )
5454 optimizer_config = dict (lr = lr )
@@ -60,8 +60,8 @@ def objective_rastrigin(params):
6060
6161
6262def objective_rosenbrok (params ):
63- lr = params ['lr' ]
64- optimizer_class = params [' optimizer_class' ]
63+ lr = params ["lr" ]
64+ optimizer_class = params [" optimizer_class" ]
6565 minimum = (1.0 , 1.0 )
6666 initial_state = (- 2.0 , 2.0 )
6767 optimizer_config = dict (lr = lr )
@@ -85,15 +85,15 @@ def plot_rastrigin(grad_iter, optimizer_name, lr):
8585 fig = plt .figure (figsize = (8 , 8 ))
8686
8787 ax = fig .add_subplot (1 , 1 , 1 )
88- ax .contour (X , Y , Z , 20 , cmap = ' jet' )
89- ax .plot (iter_x , iter_y , color = 'r' , marker = 'x' )
88+ ax .contour (X , Y , Z , 20 , cmap = " jet" )
89+ ax .plot (iter_x , iter_y , color = "r" , marker = "x" )
9090 ax .set_title (
91- ' Rastrigin func: {} with '
92- ' {} iterations, lr={:.6}' .format (optimizer_name , len (iter_x ), lr )
91+ " Rastrigin func: {} with "
92+ " {} iterations, lr={:.6}" .format (optimizer_name , len (iter_x ), lr )
9393 )
94- plt .plot (* minimum , 'gD' )
95- plt .plot (iter_x [- 1 ], iter_y [- 1 ], 'rD' )
96- plt .savefig (' docs/rastrigin_{}.png' .format (optimizer_name ))
94+ plt .plot (* minimum , "gD" )
95+ plt .plot (iter_x [- 1 ], iter_y [- 1 ], "rD" )
96+ plt .savefig (" docs/rastrigin_{}.png" .format (optimizer_name ))
9797
9898
9999def plot_rosenbrok (grad_iter , optimizer_name , lr ):
@@ -109,16 +109,16 @@ def plot_rosenbrok(grad_iter, optimizer_name, lr):
109109 fig = plt .figure (figsize = (8 , 8 ))
110110
111111 ax = fig .add_subplot (1 , 1 , 1 )
112- ax .contour (X , Y , Z , 90 , cmap = ' jet' )
113- ax .plot (iter_x , iter_y , color = 'r' , marker = 'x' )
112+ ax .contour (X , Y , Z , 90 , cmap = " jet" )
113+ ax .plot (iter_x , iter_y , color = "r" , marker = "x" )
114114
115115 ax .set_title (
116- ' Rosenbrock func: {} with {} '
117- ' iterations, lr={:.6}' .format (optimizer_name , len (iter_x ), lr )
116+ " Rosenbrock func: {} with {} "
117+ " iterations, lr={:.6}" .format (optimizer_name , len (iter_x ), lr )
118118 )
119- plt .plot (* minimum , 'gD' )
120- plt .plot (iter_x [- 1 ], iter_y [- 1 ], 'rD' )
121- plt .savefig (' docs/rosenbrock_{}.png' .format (optimizer_name ))
119+ plt .plot (* minimum , "gD" )
120+ plt .plot (iter_x [- 1 ], iter_y [- 1 ], "rD" )
121+ plt .savefig (" docs/rosenbrock_{}.png" .format (optimizer_name ))
122122
123123
124124def execute_experiments (
@@ -128,8 +128,8 @@ def execute_experiments(
128128 for item in optimizers :
129129 optimizer_class , lr_low , lr_hi = item
130130 space = {
131- ' optimizer_class' : hp .choice (' optimizer_class' , [optimizer_class ]),
132- 'lr' : hp .loguniform ('lr' , lr_low , lr_hi ),
131+ " optimizer_class" : hp .choice (" optimizer_class" , [optimizer_class ]),
132+ "lr" : hp .loguniform ("lr" , lr_low , lr_hi ),
133133 }
134134 best = fmin (
135135 fn = objective ,
@@ -138,24 +138,24 @@ def execute_experiments(
138138 max_evals = 200 ,
139139 rstate = np .random .RandomState (seed ),
140140 )
141- print (best ['lr' ], optimizer_class )
141+ print (best ["lr" ], optimizer_class )
142142
143143 steps = execute_steps (
144144 func ,
145145 initial_state ,
146146 optimizer_class ,
147- {'lr' : best ['lr' ]},
147+ {"lr" : best ["lr" ]},
148148 num_iter = 500 ,
149149 )
150- plot_func (steps , optimizer_class .__name__ , best ['lr' ])
150+ plot_func (steps , optimizer_class .__name__ , best ["lr" ])
151151
152152
153153def LookaheadYogi (* a , ** kw ):
154154 base = optim .Yogi (* a , ** kw )
155155 return optim .Lookahead (base )
156156
157157
158- if __name__ == ' __main__' :
158+ if __name__ == " __main__" :
159159 # python examples/viz_optimizers.py
160160
161161 # Each optimizer has tweaked search space to produce better plots and
0 commit comments