src.acoustools.Solvers

  1from acoustools.Utilities import *
  2from acoustools.Optimise.Constraints import constrain_phase_only
  3from acoustools.Constraints import constrain_amplitude, constrain_field, constrain_field_weighted
  4from acoustools.Optimise.Objectives import target_gorkov_BEM_mse_sine_objective
  5from acoustools.Optimise.Constraints import sine_amplitude
  6from acoustools.BEM import compute_E
  7import torch
  8
  9from torch import Tensor
 10from types import FunctionType
 11
 12from vedo import Mesh
 13
 14
 15
 16def wgs_solver_unbatched(A, b, K):
 17    '''
 18    @private
 19    unbatched WGS solver for transducer phases, better to use `wgs_solver_batch` \\
 20    `A` Forward model matrix to use \\ 
 21    `b` initial guess - normally use `torch.ones(N,1).to(device)+0j`\\
 22    `k` number of iterations to run for \\
 23    returns (hologram image, point phases, hologram)
 24    '''
 25    #Written by Giorgos Christopoulos 2022
 26    AT = torch.conj(A).T.to(device)
 27    b0 = b.to(device)
 28    x = torch.ones(A.shape[1],1).to(device) + 0j
 29    for kk in range(K):
 30        y = torch.matmul(A,x)                                   # forward propagate
 31        y = y/torch.max(torch.abs(y))                           # normalize forward propagated field (useful for next step's division)
 32        b = torch.multiply(b0,torch.divide(b,torch.abs(y)))     # update target - current target over normalized field
 33        b = b/torch.max(torch.abs(b))                           # normalize target
 34        p = torch.multiply(b,torch.divide(y,torch.abs(y)))      # keep phase, apply target amplitude
 35        r = torch.matmul(AT,p)                                  # backward propagate
 36        x = torch.divide(r,torch.abs(r))                        # keep phase for hologram  
 37                    
 38    return y, p, x
 39
 40def wgs_solver_batch(A, b, iterations):
 41    '''
 42    @private
 43    batched WGS solver for transducer phases\\
 44    `A` Forward model matrix to use \\ 
 45    `b` initial guess - normally use `torch.ones(self.N,1).to(device)+0j`\\
 46    `iterations` number of iterations to run for \\
 47    returns (point pressure ,point phases, hologram)
 48    '''
 49    AT = torch.conj(A).mT.to(device).to(DTYPE)
 50    
 51    b = b.expand(A.shape[0],-1,-1)
 52    b0 = b.to(device).expand(A.shape[0],-1,-1).to(DTYPE)
 53    
 54    x = torch.ones(A.shape[2],1).to(device).to(DTYPE) + 0j
 55    for kk in range(iterations):
 56        p = A@x
 57        p,b = constrain_field_weighted(p,b0,b)
 58        x = AT@p
 59        x = constrain_amplitude(x)
 60    y =  torch.abs(A@x) 
 61    return y, p, x
 62
 63def wgs(points:Tensor,iter:int = 200, board:Tensor|None = None, A:Tensor|None = None, b:Tensor|None=None, return_components:bool=False) -> Tensor:
 64    '''
 65    Weighted-GS algorithm\n
 66    :param points: Points to use
 67    :param iter: Number of iterations for WGS, default:`200`
 68    :param board: The Transducer array, If `None` uses default two 16x16 arrays
 69    :param A: Forward model matrix to use 
 70    :param b: initial guess - If none will use `torch.ones(N,1).to(device)+0j`
 71    :param return_components: IF True will return `hologram image, point phases, hologram` else will return `hologram`, default False
 72    :return: hologram
 73
 74    ```Python
 75    from acoustools.Solvers import wgs
 76    from acoustools.Utilities import create_points, propagate_abs
 77
 78    p = create_points(2,4)
 79    print(p)
 80    x = wgs(p)
 81    print(propagate_abs(x,p))
 82    
 83    p = p.squeeze(0)
 84    print(p)
 85    x = wgs(p)
 86    print(propagate_abs(x,p))
 87    ```
 88    '''
 89    if board is None:
 90        board = TRANSDUCERS
 91
 92    if len(points.shape) > 2:
 93        N = points.shape[2]
 94        batch=True
 95    else:
 96        N = points.shape[1]
 97        batch=False
 98
 99    if A is None:
100        A = forward_model(points, board).to(DTYPE)
101    if b is None:
102        b = torch.ones(N,1).to(device).to(DTYPE)+0j
103
104    if batch:
105        img,pha,act = wgs_solver_batch(A,b,iter)
106    else:
107        img,pha,act = wgs_solver_unbatched(A,b,iter)
108
109    if return_components:
110        return img,pha,act
111    return act
112
113
114def gspat_solver(R,forward, backward, target, iterations,return_components=False):
115    '''
116    @private
117    GS-PAT Solver for transducer phases\\
118    `R` R Matrix\\
119    `forward` forward propagation matrix\\
120    `backward` backward propagation matrix\\
121    `target` initial guess - can use `torch.ones(N,1).to(device)+0j`
122    `iterations` Number of iterations to use\\
123    returns (hologram, point activations)
124    '''
125    #Written by Giorgos Christopoulos 2022
126    field = target 
127
128    for _ in range(iterations):
129        
130#     amplitude constraint, keeps phase imposes desired amplitude ratio among points     
131        target_field = constrain_field(field, target)
132#     backward and forward propagation at once
133        field = torch.matmul(R,target_field)
134#     AFTER THE LOOP
135#     impose amplitude constraint and keep phase, after the iterative part this step is different following Dieg
136    target_field = torch.multiply(target**2,torch.divide(field,torch.abs(field)**2))
137#     back propagate 
138    complex_hologram = torch.matmul(backward,target_field)
139#     keep phase 
140    phase_hologram = torch.divide(complex_hologram,torch.abs(complex_hologram))
141    if return_components:
142        points = torch.matmul(forward,phase_hologram)
143
144        return phase_hologram, points
145    else:
146        return phase_hologram
147
148
149def gspat(points:Tensor|None=None, board:Tensor|None=None,A:Tensor|None=None,B:Tensor|None=None, 
150          R:Tensor|None=None ,b:Tensor|None = None, iterations:int=200, return_components:bool=False) -> Tensor:
151    '''
152    GSPAT Solver\n
153    :param points: Target point positions
154    :param board: The Transducer array, if None uses default two 16x16 arrays
155    :param A: The Forward propagation matrix, if `None` will be computed 
156    :param B: The backwards propagation matrix, if `None` will be computed 
157    :param R: The R propagation matrix, if `None` will be computed 
158    :param b: initial guess - If None will use `torch.ones(N,1).to(device)+0j`
159    :param iterations: Number of iterations to use
160    :param return_components: IF True will return `hologram, pressure` else will return `hologram`, default True
161    :return: Hologram
162    '''
163
164    if board is None:
165        board = TRANSDUCERS
166
167    if A is None:
168        A = forward_model(points,board)
169    if B is None:
170        B = torch.conj(A).mT.to(DTYPE)
171    if R is None:
172        R = A@B
173
174    if b is None:
175        if is_batched_points(points):
176            b = torch.ones(points.shape[2],1).to(device).to(DTYPE)
177        else:
178            b = torch.ones(points.shape[1],1).to(device).to(DTYPE)
179    
180    
181    if return_components:
182        phase_hologram,pres = gspat_solver(R,A,B,b, iterations,return_components)
183        return phase_hologram,pres
184    
185    phase_hologram = gspat_solver(R,A,B,b, iterations,return_components)
186    return phase_hologram
187
188
189def naive_solver_batched(points,board=TRANSDUCERS, activation=None, A=None):
190    '''
191    @private
192    Batched naive (backpropagation) algorithm for phase retrieval\\
193    `points` Target point positions\\
194    `board` The Transducer array, default two 16x16 arrays\\
195    `activation` Initial starting point activation \\
196    returns (point activations, hologram)
197    '''
198    if activation is None:
199        activation = torch.ones(points.shape[2],1, device=device, dtype=DTYPE) +0j
200    
201    if A is None:
202        A = forward_model_batched(points,board)
203    back = torch.conj(A).mT
204    trans = back@activation
205    trans_phase=  constrain_amplitude(trans)
206    out = A@trans_phase
207
208
209    return out, trans_phase
210
211def naive_solver_unbatched(points,board=TRANSDUCERS, activation=None,A=None):
212    '''
213    @private
214    Unbatched naive (backpropagation) algorithm for phase retrieval\\
215    `points` Target point positions\\
216    `board` The Transducer array, default two 16x16 arrays\\
217    `activation` Initial starting point activation \\
218    returns (point activations, hologram)
219    '''
220    if activation is None:
221        activation = torch.ones(points.shape[1]) +0j
222        activation = activation.to(device)
223    if A is None:
224        A = forward_model(points,board)
225    back = torch.conj(A).T
226    trans = back@activation
227    trans_phase=  constrain_amplitude(trans)
228    out = A@trans_phase
229
230
231    return out, trans_phase
232
233def naive(points:Tensor, board:Tensor|None = None, return_components:bool=False, activation:Tensor|None=None, A=None) -> Tensor:
234    '''
235    Naive solver\n
236    :param points: Target point positions
237    :param board: The Transducer array, default two 16x16 arrays
238    :param return_components: If `True` will return `hologram, pressure` else will return `hologram`, default False
239    :param activation: Initial starting point activation 
240    :param A: propagator to use
241    :return: hologram
242    '''
243    if board is None:
244        board = TRANSDUCERS
245    if is_batched_points(points):
246        out,act = naive_solver_batched(points,board=board, activation=activation, A=A)
247    else:
248        out,act = naive_solver_unbatched(points,board=board, activation=activation, A=A)
249    if return_components:
250        return act, out
251    return act
252
253def ph_thresh(z_last,z,threshold):
254    '''
255    @private
256    Phase threshhold between two timesteps point phases, clamps phase changes above `threshold` to be `threshold`\\
257    `z_last` point activation at timestep t-1\\
258    `z` point activation at timestep t\\
259    `threshold` maximum allowed phase change\\
260    returns constrained point activations
261    '''
262
263    ph1 = torch.angle(z_last)
264    ph2 = torch.angle(z)
265    dph = ph2 - ph1
266    
267    dph = torch.atan2(torch.sin(dph),torch.cos(dph)) 
268    
269    dph[dph>threshold] = threshold
270    dph[dph<-1*threshold] = -1*threshold
271    
272    ph2 = ph1 + dph
273    z = abs(z)*torch.exp(1j*ph2)
274    
275    return z
276
277def soft(x,threshold):
278    '''
279    @private
280    Soft threshold for a set of phase changes, will return the change - threshold if change > threshold else 0\\
281    `x` phase changes\\
282    `threshold` Maximum allowed hologram phase change\\
283    returns new phase changes
284    '''
285    y = torch.max(torch.abs(x) - threshold,0).values
286    y = y * torch.sign(x)
287    return y
288
289def ph_soft(x_last,x,threshold):
290    '''
291    @private
292    Soft thresholding for holograms \\
293    `x_last` Hologram from timestep t-1\\
294    `x` Hologram from timestep t \\
295    `threshold` Maximum allowed phase change\\
296    returns constrained hologram
297    '''
298    pi = torch.pi
299    ph1 = torch.angle(x_last)
300    ph2 = torch.angle(x)
301    dph = ph2 - ph1
302
303    dph[dph>pi] = dph[dph>pi] - 2*pi
304    dph[dph<-1*pi] = dph[dph<-1*pi] + 2*pi
305
306    dph = soft(dph,threshold)
307    ph2 = ph1 + dph
308    x = abs(x)*torch.exp(1j*ph2)
309    return x
310
311def temporal_wgs(A:Tensor, y:Tensor, K:int,ref_in:Tensor, ref_out:Tensor,T_in:float,T_out:float) -> Tensor:
312    '''
313    Based off `
314    Giorgos Christopoulos, Lei Gao, Diego Martinez Plasencia, Marta Betcke, 
315    Ryuji Hirayama, and Sriram Subramanian. 2023. 
316    Temporal acoustic point holography. (2024) https://doi.org/10.1145/3641519.3657443 \n
317    WGS solver for hologram where the phase change between frames is constrained \n
318    :param A: Forward model  to use
319    :param y: initial guess to use normally use `torch.ones(self.N,1).to(device)+0j`
320    :param K: Number of iterations to use
321    :param ref_in: Previous timesteps hologram
322    :param ref_out: Previous timesteps point activations
323    :param T_in: Hologram phase change threshold
324    :param T_out: Point activations phase change threshold
325    :return: (hologram image, point phases, hologram)
326    
327
328    '''
329    #ref_out -> points
330    #ref_in-> transducers
331    AT = torch.conj(A).mT.to(device)
332    y0 = y.to(device)
333    x = torch.ones(A.shape[2],1).to(device) + 0j
334    for kk in range(K):
335        z = torch.matmul(A,x)                                   # forward propagate
336        z = z/torch.max(torch.abs(z),dim=1,keepdim=True).values # normalize forward propagated field (useful for next step's division)
337        z = ph_thresh(ref_out,z,T_out); 
338        
339        y = torch.multiply(y0,torch.divide(y,torch.abs(z)))     # update target - current target over normalized field
340        y = y/torch.max(torch.abs(y),dim=1,keepdim=True).values # normalize target
341        p = torch.multiply(y,torch.divide(z,torch.abs(z)))      # keep phase, apply target amplitude
342        r = torch.matmul(AT,p)                                  # backward propagate
343        x = torch.divide(r,torch.abs(r))                        # keep phase for hologram    
344        x = ph_thresh(ref_in,x,T_in);    
345    return y, p, x
346
347
348
349
350
351
352def gradient_descent_solver(points: Tensor, objective: FunctionType, board:Tensor|None=None, optimiser:torch.optim.Optimizer=torch.optim.Adam, lr: float=0.01, 
353                            objective_params:dict={}, start:Tensor|None=None, iters:int=200, 
354                            maximise:bool=False, targets:Tensor=None, constrains:FunctionType=constrain_phase_only, log:bool=False, return_loss:bool=False,
355                            scheduler:torch.optim.lr_scheduler.LRScheduler=None, scheduler_args:dict=None, save_each_n:int = 0, save_set_n:list[int] = None,
356                            init_type:Literal['rand', 'ones','focal','trap']|Tensor='rand') -> Tensor:
357    '''
358    Solves phases using gradient descent\n
359    :param points: Target point positions 
360    :param objective: Objective function - must take have an input of (`transducer_phases, points, board, targets, **objective_params`), `targets` may be `None` for unsupervised
361    :param board: The Transducer array, default two 16x16 arrays
362    :param optimiser: Optimiser to use (should be compatable with the interface from from `torch.optim`). Default `torch.optim.Adam`
363    :param lr: Learning Rate to use. Default `0.01`
364    :param objective_params: Any parameters to be passed to `objective` as a dictionary of `{parameter_name:parameter_value}` pairs. Default `{}`
365    :param start: Initial guess. If None will default to a random initilsation of phases 
366    :param iters: Number of optimisation Iterations. Default 200
367    :param maximise: Set to `True` to maximise the objective, else minimise. Default `False`
368    :param targets: Targets to optimise towards for supervised optimisation, unsupervised if set to `None`. Default `None`
369    :param constrains: Constraints to apply to result 
370    :param log: If `True` prints the objective values at each step. Default `False`
371    :param return_loss: If `True` save and return objective values for each step as well as the optimised result 
372    :param scheduler: Learning rate scheduler to use, if `None` no scheduler is used. Default `None` 
373    :param scheduler_args: Parameters to pass to `scheduler`
374    :param save_each_n: For n>0 will save the optimiser results at every n steps. Set either `save_each_n` or `save_set_iters`
375    :param save_set_iters: List containing exact iterations to save optimiser results at. Set either `save_each_n` or `save_set_iters`
376    :param init_type: type of initialisation to use. rand:random, ones:tensor of 1, focal:naive focal point, trap:two focal points offset in the z-axis
377    :return: optimised result and optionally the objective values and results (see `return_loss`, `save_each_n` and `save_set_iters`). If either are returned both will be returned but maybe empty if not asked for
378    
379    ```Python
380    from acoustools.Optimise.Objectives import propagate_abs_sum_objective
381    from acoustools.Solvers import gradient_descent_solver
382    from acoustools.Optimise.Constraints import constrain_phase_only
383
384    p = create_points(4,2)
385    x = gradient_descent_solver(p,propagate_abs_sum_objective, 
386                                maximise=True, constrains=constrain_phase_only, 
387                                log=False, lr=1e-1)
388
389    print(propagate_abs(x,p))
390
391    ```
392    ''' 
393
394
395    if board is None:
396        board = TRANSDUCERS
397
398    losses = []
399    results = {}
400    B = points.shape[0]
401    N = points.shape[2]
402    M = board.shape[0]
403    if start is None:
404        # start = torch.ones((B,M,1)).to(device) +0j
405        if type(init_type) == Tensor:
406            start = init_type
407        else: 
408            if init_type == 'ones':
409                start = torch.ones((B,M,1))
410            elif init_type == 'focal':
411    
412                start = naive(points, board=board,return_components=False)
413            elif init_type == 'trap':
414                new_points = points.expand(B,3,2*N).clone()
415                SCALE = 2
416                new_points[:,2,:N] -= Constants.wavelength / SCALE
417                new_points[:,2,N:] += Constants.wavelength / SCALE
418                target_phases = torch.zeros(B,2*N)
419                # target_phases[:,N:] = Constants.pi
420                activation = torch.exp(1j * target_phases).unsqueeze(2).to(device)
421                
422            
423                start = naive(new_points, board, return_components=False, activation=activation)
424                
425            else: #rand is default
426                start = torch.exp(1j*torch.rand((B,M,1))*torch.pi)
427
428        start=start.to(device).to(DTYPE)
429    
430    
431    # param = torch.nn.Parameter(start).to(device)
432    param = start.requires_grad_()
433    optim = optimiser([param],lr)
434    if scheduler is not None:
435        scheduler = scheduler(optim,**scheduler_args)
436
437
438    for epoch in range(iters):
439        optim.zero_grad()       
440
441        loss = objective(param, points, board, targets, **objective_params)
442
443        if log:
444            print(epoch, loss.data.item())
445
446        if maximise:
447            loss *= -1
448        
449        if return_loss:
450            losses.append(loss)
451        
452        if save_each_n > 0 and epoch % save_each_n == 0:
453            results[epoch] = param.clone().detach()
454        elif save_set_n is not None and epoch in save_set_n:
455            results[epoch] = param.clone().detach()
456
457        
458        loss.backward(torch.tensor([1]*B).to(device))
459        optim.step()
460        if scheduler is not None:
461            scheduler.step()
462        
463        if constrains is not None:
464            param.data = constrains(param)
465
466    if return_loss or save_each_n > 0:
467        return param, losses, results
468    
469
470    return param
471    
472
473def iterative_backpropagation(points:Tensor,iterations:int = 200, board:Tensor|None = None, A:Tensor|None = None, 
474                              b:Tensor|None=None, return_components: bool=False) -> list[Tensor]:
475    '''
476    IB solver for transducer phases\n
477    :param points: Points to use
478    :param iterations: Number of iterations for WGS, default`200`
479    :param board: The Transducer array, default two 16x16 arrays
480    :param A: Forward model matrix to use 
481    :param b: initial guess - If none will use `torch.ones(N,1).to(device)+0j`
482    :param return_components: IF True will return `hologram image, point phases, hologram` else will return `hologram`, default False
483    :return: (point pressure ,point phases, hologram)
484
485    ```Python
486    from acoustools.Solvers import iterative_backpropagation
487    from acoustools.Utilities import create_points, propagate_abs
488
489    p = create_points(2,1)
490    print(p)
491    x = iterative_backpropagation(p)
492    print(propagate_abs(x,p))
493    
494    p = p.squeeze(0)
495    print(p)
496    x = iterative_backpropagation(p)
497    print(propagate_abs(x,p))
498
499    ```
500    '''
501
502    if board is None:
503        board  = TRANSDUCERS
504    
505    if len(points.shape) > 2:
506        N = points.shape[2]
507        batch=True
508    else:
509        N = points.shape[1]
510        batch=False
511
512    if A is None:
513        A = forward_model(points, board)
514    
515    if batch:
516        M = A.shape[2]
517    else:
518        M = A.shape[1]
519
520
521    if b is None:
522        b = torch.ones(N,1).to(device).to(DTYPE) +0j
523    
524    AT =  torch.conj(A).mT.to(device)
525    x = torch.ones(M,1).to(device).to(DTYPE) 
526    for kk in range(iterations):
527        p = A@x
528        p = constrain_field(p,b)
529        x = AT@p
530        x = constrain_amplitude(x)
531    y =  torch.abs(A@x) 
532    if return_components:
533        return y, p, x
534    else:
535        return x
536    
537
538
539def gorkov_target(points:Tensor, objective:FunctionType = target_gorkov_BEM_mse_sine_objective,
540                  board:Tensor=None, U_targets:Tensor=None, iterations:int=20, lr:int=1e4,
541                  constraint:FunctionType=sine_amplitude, reflector:Mesh|None=None, path:str|None=None) -> Tensor:
542    '''
543    Phase retrieval to generate target gorkov values at points via `acoustools.Solvers.gradient_descent_solver` \n
544    :param points: points of interest
545    :param objective: Objevtive function to minimise, default `acoustools.Optimise.Objectives.target_gorkov_BEM_mse_sine_objective`
546    :param board: Board to use. Default `acoustools.Utilities.TOP_BOARD`
547    :param U_targets: Target Gorkov values
548    :param iterations: Iterations to use for the solver
549    :param lr: learning rate
550    :param constraint: constraint function to use in the optimiser. default `acoustools.Optimise.Constraints.sine_amplitude`
551    :param reflector: Mesh to use as reflector or None
552    :param path: BEM path
553    :returns hologram:
554    '''
555
556    if board is None:
557        board = TOP_BOARD
558
559    if type(U_targets) == float:
560        U_targets = torch.tensor([U_targets])
561    
562    if U_targets is None:
563        U_targets = torch.tensor([-1e-5])
564
565    x = gradient_descent_solver(points, objective, 
566                                board, log=False, targets=U_targets, iters=iterations, 
567                                lr=lr, constrains=constraint, objective_params={'root':path,'reflector':reflector})
568    
569    return x
570
571
572def kd_solver(points:Tensor, board:Tensor|None = None,k=c.k):
573    B = points.shape[0]
574    M = board.shape[0]
575
576    b = board.unsqueeze(0).permute((0,2,1))
577    p = points.expand(B,3,M)
578
579    distance = torch.sqrt(torch.sum((p - b)**2,dim=1)).unsqueeze_(0).mT
580    distance = distance.to(device).to(DTYPE)
581    return torch.exp(1j * -1* distance*k)
def wgs( points: torch.Tensor, iter: int = 200, board: torch.Tensor | None = None, A: torch.Tensor | None = None, b: torch.Tensor | None = None, return_components: bool = False) -> torch.Tensor:
 64def wgs(points:Tensor,iter:int = 200, board:Tensor|None = None, A:Tensor|None = None, b:Tensor|None=None, return_components:bool=False) -> Tensor:
 65    '''
 66    Weighted-GS algorithm\n
 67    :param points: Points to use
 68    :param iter: Number of iterations for WGS, default:`200`
 69    :param board: The Transducer array, If `None` uses default two 16x16 arrays
 70    :param A: Forward model matrix to use 
 71    :param b: initial guess - If none will use `torch.ones(N,1).to(device)+0j`
 72    :param return_components: IF True will return `hologram image, point phases, hologram` else will return `hologram`, default False
 73    :return: hologram
 74
 75    ```Python
 76    from acoustools.Solvers import wgs
 77    from acoustools.Utilities import create_points, propagate_abs
 78
 79    p = create_points(2,4)
 80    print(p)
 81    x = wgs(p)
 82    print(propagate_abs(x,p))
 83    
 84    p = p.squeeze(0)
 85    print(p)
 86    x = wgs(p)
 87    print(propagate_abs(x,p))
 88    ```
 89    '''
 90    if board is None:
 91        board = TRANSDUCERS
 92
 93    if len(points.shape) > 2:
 94        N = points.shape[2]
 95        batch=True
 96    else:
 97        N = points.shape[1]
 98        batch=False
 99
100    if A is None:
101        A = forward_model(points, board).to(DTYPE)
102    if b is None:
103        b = torch.ones(N,1).to(device).to(DTYPE)+0j
104
105    if batch:
106        img,pha,act = wgs_solver_batch(A,b,iter)
107    else:
108        img,pha,act = wgs_solver_unbatched(A,b,iter)
109
110    if return_components:
111        return img,pha,act
112    return act

Weighted-GS algorithm

Parameters
  • points: Points to use
  • iter: Number of iterations for WGS, default: 200
  • board: The Transducer array, If None uses default two 16x16 arrays
  • A: Forward model matrix to use
  • b: initial guess - If none will use torch.ones(N,1).to(device)+0j
  • return_components: IF True will return hologram image, point phases, hologram else will return hologram, default False
Returns

hologram

from acoustools.Solvers import wgs
from acoustools.Utilities import create_points, propagate_abs

p = create_points(2,4)
print(p)
x = wgs(p)
print(propagate_abs(x,p))

p = p.squeeze(0)
print(p)
x = wgs(p)
print(propagate_abs(x,p))
def gspat( points: torch.Tensor | None = None, board: torch.Tensor | None = None, A: torch.Tensor | None = None, B: torch.Tensor | None = None, R: torch.Tensor | None = None, b: torch.Tensor | None = None, iterations: int = 200, return_components: bool = False) -> torch.Tensor:
150def gspat(points:Tensor|None=None, board:Tensor|None=None,A:Tensor|None=None,B:Tensor|None=None, 
151          R:Tensor|None=None ,b:Tensor|None = None, iterations:int=200, return_components:bool=False) -> Tensor:
152    '''
153    GSPAT Solver\n
154    :param points: Target point positions
155    :param board: The Transducer array, if None uses default two 16x16 arrays
156    :param A: The Forward propagation matrix, if `None` will be computed 
157    :param B: The backwards propagation matrix, if `None` will be computed 
158    :param R: The R propagation matrix, if `None` will be computed 
159    :param b: initial guess - If None will use `torch.ones(N,1).to(device)+0j`
160    :param iterations: Number of iterations to use
161    :param return_components: IF True will return `hologram, pressure` else will return `hologram`, default True
162    :return: Hologram
163    '''
164
165    if board is None:
166        board = TRANSDUCERS
167
168    if A is None:
169        A = forward_model(points,board)
170    if B is None:
171        B = torch.conj(A).mT.to(DTYPE)
172    if R is None:
173        R = A@B
174
175    if b is None:
176        if is_batched_points(points):
177            b = torch.ones(points.shape[2],1).to(device).to(DTYPE)
178        else:
179            b = torch.ones(points.shape[1],1).to(device).to(DTYPE)
180    
181    
182    if return_components:
183        phase_hologram,pres = gspat_solver(R,A,B,b, iterations,return_components)
184        return phase_hologram,pres
185    
186    phase_hologram = gspat_solver(R,A,B,b, iterations,return_components)
187    return phase_hologram

GSPAT Solver

Parameters
  • points: Target point positions
  • board: The Transducer array, if None uses default two 16x16 arrays
  • A: The Forward propagation matrix, if None will be computed
  • B: The backwards propagation matrix, if None will be computed
  • R: The R propagation matrix, if None will be computed
  • b: initial guess - If None will use torch.ones(N,1).to(device)+0j
  • iterations: Number of iterations to use
  • return_components: IF True will return hologram, pressure else will return hologram, default True
Returns

Hologram

def naive( points: torch.Tensor, board: torch.Tensor | None = None, return_components: bool = False, activation: torch.Tensor | None = None, A=None) -> torch.Tensor:
234def naive(points:Tensor, board:Tensor|None = None, return_components:bool=False, activation:Tensor|None=None, A=None) -> Tensor:
235    '''
236    Naive solver\n
237    :param points: Target point positions
238    :param board: The Transducer array, default two 16x16 arrays
239    :param return_components: If `True` will return `hologram, pressure` else will return `hologram`, default False
240    :param activation: Initial starting point activation 
241    :param A: propagator to use
242    :return: hologram
243    '''
244    if board is None:
245        board = TRANSDUCERS
246    if is_batched_points(points):
247        out,act = naive_solver_batched(points,board=board, activation=activation, A=A)
248    else:
249        out,act = naive_solver_unbatched(points,board=board, activation=activation, A=A)
250    if return_components:
251        return act, out
252    return act

Naive solver

Parameters
  • points: Target point positions
  • board: The Transducer array, default two 16x16 arrays
  • return_components: If True will return hologram, pressure else will return hologram, default False
  • activation: Initial starting point activation
  • A: propagator to use
Returns

hologram

def temporal_wgs( A: torch.Tensor, y: torch.Tensor, K: int, ref_in: torch.Tensor, ref_out: torch.Tensor, T_in: float, T_out: float) -> torch.Tensor:
312def temporal_wgs(A:Tensor, y:Tensor, K:int,ref_in:Tensor, ref_out:Tensor,T_in:float,T_out:float) -> Tensor:
313    '''
314    Based off `
315    Giorgos Christopoulos, Lei Gao, Diego Martinez Plasencia, Marta Betcke, 
316    Ryuji Hirayama, and Sriram Subramanian. 2023. 
317    Temporal acoustic point holography. (2024) https://doi.org/10.1145/3641519.3657443 \n
318    WGS solver for hologram where the phase change between frames is constrained \n
319    :param A: Forward model  to use
320    :param y: initial guess to use normally use `torch.ones(self.N,1).to(device)+0j`
321    :param K: Number of iterations to use
322    :param ref_in: Previous timesteps hologram
323    :param ref_out: Previous timesteps point activations
324    :param T_in: Hologram phase change threshold
325    :param T_out: Point activations phase change threshold
326    :return: (hologram image, point phases, hologram)
327    
328
329    '''
330    #ref_out -> points
331    #ref_in-> transducers
332    AT = torch.conj(A).mT.to(device)
333    y0 = y.to(device)
334    x = torch.ones(A.shape[2],1).to(device) + 0j
335    for kk in range(K):
336        z = torch.matmul(A,x)                                   # forward propagate
337        z = z/torch.max(torch.abs(z),dim=1,keepdim=True).values # normalize forward propagated field (useful for next step's division)
338        z = ph_thresh(ref_out,z,T_out); 
339        
340        y = torch.multiply(y0,torch.divide(y,torch.abs(z)))     # update target - current target over normalized field
341        y = y/torch.max(torch.abs(y),dim=1,keepdim=True).values # normalize target
342        p = torch.multiply(y,torch.divide(z,torch.abs(z)))      # keep phase, apply target amplitude
343        r = torch.matmul(AT,p)                                  # backward propagate
344        x = torch.divide(r,torch.abs(r))                        # keep phase for hologram    
345        x = ph_thresh(ref_in,x,T_in);    
346    return y, p, x

Based off ` Giorgos Christopoulos, Lei Gao, Diego Martinez Plasencia, Marta Betcke, Ryuji Hirayama, and Sriram Subramanian. 2023. Temporal acoustic point holography. (2024) https://doi.org/10.1145/3641519.3657443

WGS solver for hologram where the phase change between frames is constrained

Parameters
  • A: Forward model to use
  • y: initial guess to use normally use torch.ones(self.N,1).to(device)+0j
  • K: Number of iterations to use
  • ref_in: Previous timesteps hologram
  • ref_out: Previous timesteps point activations
  • T_in: Hologram phase change threshold
  • T_out: Point activations phase change threshold
Returns

(hologram image, point phases, hologram)

def gradient_descent_solver( points: torch.Tensor, objective: function, board: torch.Tensor | None = None, optimiser: torch.optim.optimizer.Optimizer = <class 'torch.optim.adam.Adam'>, lr: float = 0.01, objective_params: dict = {}, start: torch.Tensor | None = None, iters: int = 200, maximise: bool = False, targets: torch.Tensor = None, constrains: function = <function constrain_phase_only>, log: bool = False, return_loss: bool = False, scheduler: torch.optim.lr_scheduler.LRScheduler = None, scheduler_args: dict = None, save_each_n: int = 0, save_set_n: list[int] = None, init_type: Union[Literal['rand', 'ones', 'focal', 'trap'], torch.Tensor] = 'rand') -> torch.Tensor:
353def gradient_descent_solver(points: Tensor, objective: FunctionType, board:Tensor|None=None, optimiser:torch.optim.Optimizer=torch.optim.Adam, lr: float=0.01, 
354                            objective_params:dict={}, start:Tensor|None=None, iters:int=200, 
355                            maximise:bool=False, targets:Tensor=None, constrains:FunctionType=constrain_phase_only, log:bool=False, return_loss:bool=False,
356                            scheduler:torch.optim.lr_scheduler.LRScheduler=None, scheduler_args:dict=None, save_each_n:int = 0, save_set_n:list[int] = None,
357                            init_type:Literal['rand', 'ones','focal','trap']|Tensor='rand') -> Tensor:
358    '''
359    Solves phases using gradient descent\n
360    :param points: Target point positions 
361    :param objective: Objective function - must take have an input of (`transducer_phases, points, board, targets, **objective_params`), `targets` may be `None` for unsupervised
362    :param board: The Transducer array, default two 16x16 arrays
363    :param optimiser: Optimiser to use (should be compatable with the interface from from `torch.optim`). Default `torch.optim.Adam`
364    :param lr: Learning Rate to use. Default `0.01`
365    :param objective_params: Any parameters to be passed to `objective` as a dictionary of `{parameter_name:parameter_value}` pairs. Default `{}`
366    :param start: Initial guess. If None will default to a random initilsation of phases 
367    :param iters: Number of optimisation Iterations. Default 200
368    :param maximise: Set to `True` to maximise the objective, else minimise. Default `False`
369    :param targets: Targets to optimise towards for supervised optimisation, unsupervised if set to `None`. Default `None`
370    :param constrains: Constraints to apply to result 
371    :param log: If `True` prints the objective values at each step. Default `False`
372    :param return_loss: If `True` save and return objective values for each step as well as the optimised result 
373    :param scheduler: Learning rate scheduler to use, if `None` no scheduler is used. Default `None` 
374    :param scheduler_args: Parameters to pass to `scheduler`
375    :param save_each_n: For n>0 will save the optimiser results at every n steps. Set either `save_each_n` or `save_set_iters`
376    :param save_set_iters: List containing exact iterations to save optimiser results at. Set either `save_each_n` or `save_set_iters`
377    :param init_type: type of initialisation to use. rand:random, ones:tensor of 1, focal:naive focal point, trap:two focal points offset in the z-axis
378    :return: optimised result and optionally the objective values and results (see `return_loss`, `save_each_n` and `save_set_iters`). If either are returned both will be returned but maybe empty if not asked for
379    
380    ```Python
381    from acoustools.Optimise.Objectives import propagate_abs_sum_objective
382    from acoustools.Solvers import gradient_descent_solver
383    from acoustools.Optimise.Constraints import constrain_phase_only
384
385    p = create_points(4,2)
386    x = gradient_descent_solver(p,propagate_abs_sum_objective, 
387                                maximise=True, constrains=constrain_phase_only, 
388                                log=False, lr=1e-1)
389
390    print(propagate_abs(x,p))
391
392    ```
393    ''' 
394
395
396    if board is None:
397        board = TRANSDUCERS
398
399    losses = []
400    results = {}
401    B = points.shape[0]
402    N = points.shape[2]
403    M = board.shape[0]
404    if start is None:
405        # start = torch.ones((B,M,1)).to(device) +0j
406        if type(init_type) == Tensor:
407            start = init_type
408        else: 
409            if init_type == 'ones':
410                start = torch.ones((B,M,1))
411            elif init_type == 'focal':
412    
413                start = naive(points, board=board,return_components=False)
414            elif init_type == 'trap':
415                new_points = points.expand(B,3,2*N).clone()
416                SCALE = 2
417                new_points[:,2,:N] -= Constants.wavelength / SCALE
418                new_points[:,2,N:] += Constants.wavelength / SCALE
419                target_phases = torch.zeros(B,2*N)
420                # target_phases[:,N:] = Constants.pi
421                activation = torch.exp(1j * target_phases).unsqueeze(2).to(device)
422                
423            
424                start = naive(new_points, board, return_components=False, activation=activation)
425                
426            else: #rand is default
427                start = torch.exp(1j*torch.rand((B,M,1))*torch.pi)
428
429        start=start.to(device).to(DTYPE)
430    
431    
432    # param = torch.nn.Parameter(start).to(device)
433    param = start.requires_grad_()
434    optim = optimiser([param],lr)
435    if scheduler is not None:
436        scheduler = scheduler(optim,**scheduler_args)
437
438
439    for epoch in range(iters):
440        optim.zero_grad()       
441
442        loss = objective(param, points, board, targets, **objective_params)
443
444        if log:
445            print(epoch, loss.data.item())
446
447        if maximise:
448            loss *= -1
449        
450        if return_loss:
451            losses.append(loss)
452        
453        if save_each_n > 0 and epoch % save_each_n == 0:
454            results[epoch] = param.clone().detach()
455        elif save_set_n is not None and epoch in save_set_n:
456            results[epoch] = param.clone().detach()
457
458        
459        loss.backward(torch.tensor([1]*B).to(device))
460        optim.step()
461        if scheduler is not None:
462            scheduler.step()
463        
464        if constrains is not None:
465            param.data = constrains(param)
466
467    if return_loss or save_each_n > 0:
468        return param, losses, results
469    
470
471    return param

Solves phases using gradient descent

Parameters
  • points: Target point positions
  • objective: Objective function - must take have an input of (transducer_phases, points, board, targets, **objective_params), targets may be None for unsupervised
  • board: The Transducer array, default two 16x16 arrays
  • optimiser: Optimiser to use (should be compatable with the interface from from torch.optim). Default torch.optim.Adam
  • lr: Learning Rate to use. Default 0.01
  • **objective_params: Any parameters to be passed to objective as a dictionary of {parameter_name**: parameter_value} pairs. Default {}
  • start: Initial guess. If None will default to a random initilsation of phases
  • iters: Number of optimisation Iterations. Default 200
  • maximise: Set to True to maximise the objective, else minimise. Default False
  • targets: Targets to optimise towards for supervised optimisation, unsupervised if set to None. Default None
  • constrains: Constraints to apply to result
  • log: If True prints the objective values at each step. Default False
  • return_loss: If True save and return objective values for each step as well as the optimised result
  • scheduler: Learning rate scheduler to use, if None no scheduler is used. Default None
  • scheduler_args: Parameters to pass to scheduler
  • save_each_n: For n>0 will save the optimiser results at every n steps. Set either save_each_n or save_set_iters
  • save_set_iters: List containing exact iterations to save optimiser results at. Set either save_each_n or save_set_iters
  • init_type: type of initialisation to use. rand:random, ones:tensor of 1, focal:naive focal point, trap: two focal points offset in the z-axis
Returns

optimised result and optionally the objective values and results (see return_loss, save_each_n and save_set_iters). If either are returned both will be returned but maybe empty if not asked for

from acoustools.Optimise.Objectives import propagate_abs_sum_objective
from acoustools.Solvers import gradient_descent_solver
from acoustools.Optimise.Constraints import constrain_phase_only

p = create_points(4,2)
x = gradient_descent_solver(p,propagate_abs_sum_objective, 
                            maximise=True, constrains=constrain_phase_only, 
                            log=False, lr=1e-1)

print(propagate_abs(x,p))
def iterative_backpropagation( points: torch.Tensor, iterations: int = 200, board: torch.Tensor | None = None, A: torch.Tensor | None = None, b: torch.Tensor | None = None, return_components: bool = False) -> list[torch.Tensor]:
474def iterative_backpropagation(points:Tensor,iterations:int = 200, board:Tensor|None = None, A:Tensor|None = None, 
475                              b:Tensor|None=None, return_components: bool=False) -> list[Tensor]:
476    '''
477    IB solver for transducer phases\n
478    :param points: Points to use
479    :param iterations: Number of iterations for WGS, default`200`
480    :param board: The Transducer array, default two 16x16 arrays
481    :param A: Forward model matrix to use 
482    :param b: initial guess - If none will use `torch.ones(N,1).to(device)+0j`
483    :param return_components: IF True will return `hologram image, point phases, hologram` else will return `hologram`, default False
484    :return: (point pressure ,point phases, hologram)
485
486    ```Python
487    from acoustools.Solvers import iterative_backpropagation
488    from acoustools.Utilities import create_points, propagate_abs
489
490    p = create_points(2,1)
491    print(p)
492    x = iterative_backpropagation(p)
493    print(propagate_abs(x,p))
494    
495    p = p.squeeze(0)
496    print(p)
497    x = iterative_backpropagation(p)
498    print(propagate_abs(x,p))
499
500    ```
501    '''
502
503    if board is None:
504        board  = TRANSDUCERS
505    
506    if len(points.shape) > 2:
507        N = points.shape[2]
508        batch=True
509    else:
510        N = points.shape[1]
511        batch=False
512
513    if A is None:
514        A = forward_model(points, board)
515    
516    if batch:
517        M = A.shape[2]
518    else:
519        M = A.shape[1]
520
521
522    if b is None:
523        b = torch.ones(N,1).to(device).to(DTYPE) +0j
524    
525    AT =  torch.conj(A).mT.to(device)
526    x = torch.ones(M,1).to(device).to(DTYPE) 
527    for kk in range(iterations):
528        p = A@x
529        p = constrain_field(p,b)
530        x = AT@p
531        x = constrain_amplitude(x)
532    y =  torch.abs(A@x) 
533    if return_components:
534        return y, p, x
535    else:
536        return x

IB solver for transducer phases

Parameters
  • points: Points to use
  • iterations: Number of iterations for WGS, default200
  • board: The Transducer array, default two 16x16 arrays
  • A: Forward model matrix to use
  • b: initial guess - If none will use torch.ones(N,1).to(device)+0j
  • return_components: IF True will return hologram image, point phases, hologram else will return hologram, default False
Returns

(point pressure ,point phases, hologram)

from acoustools.Solvers import iterative_backpropagation
from acoustools.Utilities import create_points, propagate_abs

p = create_points(2,1)
print(p)
x = iterative_backpropagation(p)
print(propagate_abs(x,p))

p = p.squeeze(0)
print(p)
x = iterative_backpropagation(p)
print(propagate_abs(x,p))
def gorkov_target( points: torch.Tensor, objective: function = <function target_gorkov_BEM_mse_sine_objective>, board: torch.Tensor = None, U_targets: torch.Tensor = None, iterations: int = 20, lr: int = 10000.0, constraint: function = <function sine_amplitude>, reflector: vedo.mesh.Mesh | None = None, path: str | None = None) -> torch.Tensor:
540def gorkov_target(points:Tensor, objective:FunctionType = target_gorkov_BEM_mse_sine_objective,
541                  board:Tensor=None, U_targets:Tensor=None, iterations:int=20, lr:int=1e4,
542                  constraint:FunctionType=sine_amplitude, reflector:Mesh|None=None, path:str|None=None) -> Tensor:
543    '''
544    Phase retrieval to generate target gorkov values at points via `acoustools.Solvers.gradient_descent_solver` \n
545    :param points: points of interest
546    :param objective: Objevtive function to minimise, default `acoustools.Optimise.Objectives.target_gorkov_BEM_mse_sine_objective`
547    :param board: Board to use. Default `acoustools.Utilities.TOP_BOARD`
548    :param U_targets: Target Gorkov values
549    :param iterations: Iterations to use for the solver
550    :param lr: learning rate
551    :param constraint: constraint function to use in the optimiser. default `acoustools.Optimise.Constraints.sine_amplitude`
552    :param reflector: Mesh to use as reflector or None
553    :param path: BEM path
554    :returns hologram:
555    '''
556
557    if board is None:
558        board = TOP_BOARD
559
560    if type(U_targets) == float:
561        U_targets = torch.tensor([U_targets])
562    
563    if U_targets is None:
564        U_targets = torch.tensor([-1e-5])
565
566    x = gradient_descent_solver(points, objective, 
567                                board, log=False, targets=U_targets, iters=iterations, 
568                                lr=lr, constrains=constraint, objective_params={'root':path,'reflector':reflector})
569    
570    return x

Phase retrieval to generate target gorkov values at points via acoustools.Solvers.gradient_descent_solver

Parameters
  • points: points of interest
  • objective: Objevtive function to minimise, default acoustools.Optimise.Objectives.target_gorkov_BEM_mse_sine_objective
  • board: Board to use. Default acoustools.Utilities.TOP_BOARD
  • U_targets: Target Gorkov values
  • iterations: Iterations to use for the solver
  • lr: learning rate
  • constraint: constraint function to use in the optimiser. default acoustools.Optimise.Constraints.sine_amplitude
  • reflector: Mesh to use as reflector or None
  • path: BEM path :returns hologram:
def kd_solver( points: torch.Tensor, board: torch.Tensor | None = None, k=732.7329804081634):
573def kd_solver(points:Tensor, board:Tensor|None = None,k=c.k):
574    B = points.shape[0]
575    M = board.shape[0]
576
577    b = board.unsqueeze(0).permute((0,2,1))
578    p = points.expand(B,3,M)
579
580    distance = torch.sqrt(torch.sum((p - b)**2,dim=1)).unsqueeze_(0).mT
581    distance = distance.to(device).to(DTYPE)
582    return torch.exp(1j * -1* distance*k)