src.acoustools.Solvers

  1from acoustools.Utilities import *
  2from acoustools.Optimise.Constraints import constrain_phase_only
  3from acoustools.Constraints import constrain_amplitude, constrain_field, constrain_field_weighted
  4from acoustools.Optimise.Objectives import target_gorkov_BEM_mse_sine_objective
  5from acoustools.Optimise.Constraints import sine_amplitude
  6import torch
  7
  8from torch import Tensor
  9from types import FunctionType
 10
 11from vedo import Mesh
 12
 13
 14
 15def wgs_solver_unbatched(A, b, K):
 16    '''
 17    @private
 18    unbatched WGS solver for transducer phases, better to use `wgs_solver_batch` \\
 19    `A` Forward model matrix to use \\ 
 20    `b` initial guess - normally use `torch.ones(N,1).to(device)+0j`\\
 21    `k` number of iterations to run for \\
 22    returns (hologram image, point phases, hologram)
 23    '''
 24    #Written by Giorgos Christopoulos 2022
 25    AT = torch.conj(A).T.to(device)
 26    b0 = b.to(device)
 27    x = torch.ones(A.shape[1],1).to(device) + 0j
 28    for kk in range(K):
 29        y = torch.matmul(A,x)                                   # forward propagate
 30        y = y/torch.max(torch.abs(y))                           # normalize forward propagated field (useful for next step's division)
 31        b = torch.multiply(b0,torch.divide(b,torch.abs(y)))     # update target - current target over normalized field
 32        b = b/torch.max(torch.abs(b))                           # normalize target
 33        p = torch.multiply(b,torch.divide(y,torch.abs(y)))      # keep phase, apply target amplitude
 34        r = torch.matmul(AT,p)                                  # backward propagate
 35        x = torch.divide(r,torch.abs(r))                        # keep phase for hologram  
 36                    
 37    return y, p, x
 38
 39def wgs_solver_batch(A, b, iterations):
 40    '''
 41    @private
 42    batched WGS solver for transducer phases\\
 43    `A` Forward model matrix to use \\ 
 44    `b` initial guess - normally use `torch.ones(self.N,1).to(device)+0j`\\
 45    `iterations` number of iterations to run for \\
 46    returns (point pressure ,point phases, hologram)
 47    '''
 48    AT = torch.conj(A).mT.to(device).to(DTYPE)
 49    
 50    b = b.expand(A.shape[0],-1,-1)
 51    b0 = b.to(device).expand(A.shape[0],-1,-1).to(DTYPE)
 52    
 53    x = torch.ones(A.shape[2],1).to(device).to(DTYPE) + 0j
 54    for kk in range(iterations):
 55        p = A@x
 56        p,b = constrain_field_weighted(p,b0,b)
 57        x = AT@p
 58        x = constrain_amplitude(x)
 59    y =  torch.abs(A@x) 
 60    return y, p, x
 61
 62def wgs(points:Tensor,iterations:int = 200, board:Tensor|None = None, A:Tensor|None = None, b:Tensor|None=None, 
 63        return_components:bool=False, p_ref=c.P_ref, k=c.k, transducer_radius = c.radius, norms=None) -> Tensor:
 64    '''
 65    Weighted-GS algorithm\n
 66    :param points: Points to use
 67    :param iter: Number of iterations for WGS, default:`200`
 68    :param board: The Transducer array, If `None` uses default two 16x16 arrays
 69    :param A: Forward model matrix to use 
 70    :param b: initial guess - If none will use `torch.ones(N,1).to(device)+0j`
 71    :param return_components: IF True will return `hologram image, point phases, hologram` else will return `hologram`, default False
 72    :return: hologram
 73
 74    ```Python
 75    from acoustools.Solvers import wgs
 76    from acoustools.Utilities import create_points, propagate_abs
 77
 78    p = create_points(2,4)
 79    print(p)
 80    x = wgs(p)
 81    print(propagate_abs(x,p))
 82    
 83    p = p.squeeze(0)
 84    print(p)
 85    x = wgs(p)
 86    print(propagate_abs(x,p))
 87    ```
 88    '''
 89    if board is None:
 90        board = TRANSDUCERS
 91
 92    if len(points.shape) > 2:
 93        N = points.shape[2]
 94        batch=True
 95    else:
 96        N = points.shape[1]
 97        batch=False
 98
 99    if A is None:
100        A = forward_model(points, board, p_ref=p_ref, k=k, transducer_radius=transducer_radius, norms=norms).to(DTYPE)
101    if b is None:
102        b = torch.ones(N,1).to(device).to(DTYPE)+0j
103
104    if batch:
105        img,pha,act = wgs_solver_batch(A,b,iterations)
106    else:
107        img,pha,act = wgs_solver_unbatched(A,b,iterations)
108
109    if return_components:
110        return img,pha,act
111    return act
112
113
114def gspat_solver(R,forward, backward, target, iterations,return_components=False):
115    '''
116    @private
117    GS-PAT Solver for transducer phases\\
118    `R` R Matrix\\
119    `forward` forward propagation matrix\\
120    `backward` backward propagation matrix\\
121    `target` initial guess - can use `torch.ones(N,1).to(device)+0j`
122    `iterations` Number of iterations to use\\
123    returns (hologram, point activations)
124    '''
125    #Written by Giorgos Christopoulos 2022
126    field = target 
127
128    for _ in range(iterations):
129        
130#     amplitude constraint, keeps phase imposes desired amplitude ratio among points     
131        target_field = constrain_field(field, target)
132#     backward and forward propagation at once
133        field = torch.matmul(R,target_field)
134#     AFTER THE LOOP
135#     impose amplitude constraint and keep phase, after the iterative part this step is different following Dieg
136    target_field = torch.multiply(target**2,torch.divide(field,torch.abs(field)**2))
137#     back propagate 
138    complex_hologram = torch.matmul(backward,target_field)
139#     keep phase 
140    phase_hologram = torch.divide(complex_hologram,torch.abs(complex_hologram))
141    if return_components:
142        points = torch.matmul(forward,phase_hologram)
143
144        return phase_hologram, points
145    else:
146        return phase_hologram
147
148
149def gspat(points:Tensor|None=None, board:Tensor|None=None,A:Tensor|None=None,B:Tensor|None=None, 
150          R:Tensor|None=None ,b:Tensor|None = None, iterations:int=200, return_components:bool=False, p_ref=c.P_ref, k=c.k, transducer_radius = c.radius, norms=None) -> Tensor:
151    '''
152    GSPAT Solver\n
153    :param points: Target point positions
154    :param board: The Transducer array, if None uses default two 16x16 arrays
155    :param A: The Forward propagation matrix, if `None` will be computed 
156    :param B: The backwards propagation matrix, if `None` will be computed 
157    :param R: The R propagation matrix, if `None` will be computed 
158    :param b: initial guess - If None will use `torch.ones(N,1).to(device)+0j`
159    :param iterations: Number of iterations to use
160    :param return_components: IF True will return `hologram, pressure` else will return `hologram`, default True
161    :return: Hologram
162    '''
163
164    if board is None:
165        board = TRANSDUCERS
166
167    if A is None:
168        A = forward_model(points,board,p_ref=p_ref,norms=norms, k=k, transducer_radius=transducer_radius)
169    if B is None:
170        B = torch.conj(A).mT.to(DTYPE)
171    if R is None:
172        R = A@B
173
174    if b is None:
175        if is_batched_points(points):
176            b = torch.ones(points.shape[2],1).to(device).to(DTYPE)
177        else:
178            b = torch.ones(points.shape[1],1).to(device).to(DTYPE)
179    
180    
181    if return_components:
182        phase_hologram,pres = gspat_solver(R,A,B,b, iterations,return_components)
183        return phase_hologram,pres
184    
185    phase_hologram = gspat_solver(R,A,B,b, iterations,return_components)
186    return phase_hologram
187
188
189def naive_solver_batched(points,board=TRANSDUCERS, activation=None, A=None, p_ref=c.P_ref, k=c.k, transducer_radius = c.radius, norms=None):
190    '''
191    @private
192    Batched naive (backpropagation) algorithm for phase retrieval\\
193    `points` Target point positions\\
194    `board` The Transducer array, default two 16x16 arrays\\
195    `activation` Initial starting point activation \\
196    returns (point activations, hologram)
197    '''
198    if activation is None:
199        activation = torch.ones(points.shape[2],1, device=device, dtype=DTYPE) +0j
200    
201    if A is None:
202        A = forward_model_batched(points,board,p_ref=p_ref, k=k, transducer_radius=transducer_radius, norms=norms)
203    back = torch.conj(A).mT
204    trans = back@activation
205    trans_phase=  constrain_amplitude(trans)
206    out = A@trans_phase
207
208
209    return out, trans_phase
210
211def naive_solver_unbatched(points,board=TRANSDUCERS, activation=None,A=None, p_ref=c.P_ref, k=c.k, transducer_radius = c.radius, norms=None):
212    '''
213    @private
214    Unbatched naive (backpropagation) algorithm for phase retrieval\\
215    `points` Target point positions\\
216    `board` The Transducer array, default two 16x16 arrays\\
217    `activation` Initial starting point activation \\
218    returns (point activations, hologram)
219    '''
220    if activation is None:
221        activation = torch.ones(points.shape[1]) +0j
222        activation = activation.to(device)
223    if A is None:
224        A = forward_model(points,board,p_ref=p_ref, k=k, transducer_radius=transducer_radius, norms=norms)
225    back = torch.conj(A).T
226    trans = back@activation
227    trans_phase=  constrain_amplitude(trans)
228    out = A@trans_phase
229
230
231    return out, trans_phase
232
233def naive(points:Tensor, board:Tensor|None = None, return_components:bool=False, activation:Tensor|None=None, A=None, 
234          p_ref=c.P_ref, k=c.k, transducer_radius = c.radius, norms=None, iterations=None) -> Tensor:
235    '''
236    Naive solver\n
237    :param points: Target point positions
238    :param board: The Transducer array, default two 16x16 arrays
239    :param return_components: If `True` will return `hologram, pr
240    :param iterations: Ignroed - for compatability
241    :param A: propagator to use
242    :return: hologram
243    '''
244    if board is None:
245        board = TRANSDUCERS
246    if is_batched_points(points):
247        out,act = naive_solver_batched(points,board=board, activation=activation, A=A, p_ref=p_ref, k=k, transducer_radius=transducer_radius, norms=norms)
248    else:
249        out,act = naive_solver_unbatched(points,board=board, activation=activation, A=A, p_ref=p_ref, k=k, transducer_radius=transducer_radius, norms=norms)
250    if return_components:
251        return act, out
252    return act
253
254def ph_thresh(z_last,z,threshold):
255    '''
256    @private
257    Phase threshhold between two timesteps point phases, clamps phase changes above `threshold` to be `threshold`\\
258    `z_last` point activation at timestep t-1\\
259    `z` point activation at timestep t\\
260    `threshold` maximum allowed phase change\\
261    returns constrained point activations
262    '''
263
264    ph1 = torch.angle(z_last)
265    ph2 = torch.angle(z)
266    dph = ph2 - ph1
267    
268    dph = torch.atan2(torch.sin(dph),torch.cos(dph)) 
269    
270    dph[dph>threshold] = threshold
271    dph[dph<-1*threshold] = -1*threshold
272    
273    ph2 = ph1 + dph
274    z = abs(z)*torch.exp(1j*ph2)
275    
276    return z
277
278def soft(x,threshold):
279    '''
280    @private
281    Soft threshold for a set of phase changes, will return the change - threshold if change > threshold else 0\\
282    `x` phase changes\\
283    `threshold` Maximum allowed hologram phase change\\
284    returns new phase changes
285    '''
286    y = torch.max(torch.abs(x) - threshold,0).values
287    y = y * torch.sign(x)
288    return y
289
290def ph_soft(x_last,x,threshold):
291    '''
292    @private
293    Soft thresholding for holograms \\
294    `x_last` Hologram from timestep t-1\\
295    `x` Hologram from timestep t \\
296    `threshold` Maximum allowed phase change\\
297    returns constrained hologram
298    '''
299    pi = torch.pi
300    ph1 = torch.angle(x_last)
301    ph2 = torch.angle(x)
302    dph = ph2 - ph1
303
304    dph[dph>pi] = dph[dph>pi] - 2*pi
305    dph[dph<-1*pi] = dph[dph<-1*pi] + 2*pi
306
307    dph = soft(dph,threshold)
308    ph2 = ph1 + dph
309    x = abs(x)*torch.exp(1j*ph2)
310    return x
311
312def temporal_wgs(A:Tensor, y:Tensor, K:int,ref_in:Tensor, ref_out:Tensor,T_in:float,T_out:float) -> Tensor:
313    '''
314    Based off `
315    Giorgos Christopoulos, Lei Gao, Diego Martinez Plasencia, Marta Betcke, 
316    Ryuji Hirayama, and Sriram Subramanian. 2023. 
317    Temporal acoustic point holography. (2024) https://doi.org/10.1145/3641519.3657443 \n
318    WGS solver for hologram where the phase change between frames is constrained \n
319    :param A: Forward model  to use
320    :param y: initial guess to use normally use `torch.ones(self.N,1).to(device)+0j`
321    :param K: Number of iterations to use
322    :param ref_in: Previous timesteps hologram
323    :param ref_out: Previous timesteps point activations
324    :param T_in: Hologram phase change threshold
325    :param T_out: Point activations phase change threshold
326    :return: (hologram image, point phases, hologram)
327    
328
329    '''
330    #ref_out -> points
331    #ref_in-> transducers
332    AT = torch.conj(A).mT.to(device)
333    y0 = y.to(device)
334    x = torch.ones(A.shape[2],1).to(device) + 0j
335    for kk in range(K):
336        z = torch.matmul(A,x)                                   # forward propagate
337        z = z/torch.max(torch.abs(z),dim=1,keepdim=True).values # normalize forward propagated field (useful for next step's division)
338        z = ph_thresh(ref_out,z,T_out); 
339        
340        y = torch.multiply(y0,torch.divide(y,torch.abs(z)))     # update target - current target over normalized field
341        y = y/torch.max(torch.abs(y),dim=1,keepdim=True).values # normalize target
342        p = torch.multiply(y,torch.divide(z,torch.abs(z)))      # keep phase, apply target amplitude
343        r = torch.matmul(AT,p)                                  # backward propagate
344        x = torch.divide(r,torch.abs(r))                        # keep phase for hologram    
345        x = ph_thresh(ref_in,x,T_in);    
346    return y, p, x
347
348
349
350
351
352
353def gradient_descent_solver(points: Tensor, objective: FunctionType, board:Tensor|None=None, optimiser:torch.optim.Optimizer=torch.optim.Adam, lr: float=0.01, 
354                            objective_params:dict={}, start:Tensor|None=None, iters:int=200, 
355                            maximise:bool=False, targets:Tensor=None, constrains:FunctionType=constrain_phase_only, log:bool=False, return_loss:bool=False,
356                            scheduler:torch.optim.lr_scheduler.LRScheduler=None, scheduler_args:dict=None, save_each_n:int = 0, save_set_n:list[int] = None,
357                            init_type:Literal['rand', 'ones','focal','trap']|Tensor='rand',
358                            p_ref=c.P_ref, k=c.k, transducer_radius = c.radius, norms=None) -> Tensor:
359    '''
360    Solves phases using gradient descent\n
361    :param points: Target point positions 
362    :param objective: Objective function - must take have an input of (`transducer_phases, points, board, targets, **objective_params`), `targets` may be `None` for unsupervised
363    :param board: The Transducer array, default two 16x16 arrays
364    :param optimiser: Optimiser to use (should be compatable with the interface from from `torch.optim`). Default `torch.optim.Adam`
365    :param lr: Learning Rate to use. Default `0.01`
366    :param objective_params: Any parameters to be passed to `objective` as a dictionary of `{parameter_name:parameter_value}` pairs. Default `{}`
367    :param start: Initial guess. If None will default to a random initilsation of phases 
368    :param iters: Number of optimisation Iterations. Default 200
369    :param maximise: Set to `True` to maximise the objective, else minimise. Default `False`
370    :param targets: Targets to optimise towards for supervised optimisation, unsupervised if set to `None`. Default `None`
371    :param constrains: Constraints to apply to result 
372    :param log: If `True` prints the objective values at each step. Default `False`
373    :param return_loss: If `True` save and return objective values for each step as well as the optimised result 
374    :param scheduler: Learning rate scheduler to use, if `None` no scheduler is used. Default `None` 
375    :param scheduler_args: Parameters to pass to `scheduler`
376    :param save_each_n: For n>0 will save the optimiser results at every n steps. Set either `save_each_n` or `save_set_iters`
377    :param save_set_iters: List containing exact iterations to save optimiser results at. Set either `save_each_n` or `save_set_iters`
378    :param init_type: type of initialisation to use. rand:random, ones:tensor of 1, focal:naive focal point, trap:two focal points offset in the z-axis
379    :return: optimised result and optionally the objective values and results (see `return_loss`, `save_each_n` and `save_set_iters`). If either are returned both will be returned but maybe empty if not asked for
380    
381    ```Python
382    from acoustools.Optimise.Objectives import propagate_abs_sum_objective
383    from acoustools.Solvers import gradient_descent_solver
384    from acoustools.Optimise.Constraints import constrain_phase_only
385
386    p = create_points(4,2)
387    x = gradient_descent_solver(p,propagate_abs_sum_objective, 
388                                maximise=True, constrains=constrain_phase_only, 
389                                log=False, lr=1e-1)
390
391    print(propagate_abs(x,p))
392
393    ```
394    ''' 
395
396
397    if board is None:
398        board = TRANSDUCERS
399
400    losses = []
401    results = {}
402    B = points.shape[0] if points is not None else 1
403    N = points.shape[2] if points is not None else 1
404    M = board.shape[0]
405    if start is None:
406        # start = torch.ones((B,M,1)).to(device) +0j
407        if type(init_type) == Tensor:
408            start = init_type
409        else: 
410            if init_type == 'ones':
411                start = torch.ones((B,M,1))
412            elif init_type == 'focal':
413    
414                start = naive(points, board=board,return_components=False, p_ref=p_ref, k=k, transducer_radius=transducer_radius, norms=norms)
415            elif init_type == 'trap':
416                new_points = points.expand(B,3,2*N).clone()
417                SCALE = 2
418                new_points[:,2,:N] -= Constants.wavelength / SCALE
419                new_points[:,2,N:] += Constants.wavelength / SCALE
420                target_phases = torch.zeros(B,2*N)
421                # target_phases[:,N:] = Constants.pi
422                activation = torch.exp(1j * target_phases).unsqueeze(2).to(device)
423                
424            
425                start = naive(new_points, board, return_components=False, activation=activation, p_ref=p_ref, k=k, transducer_radius=transducer_radius, norms=norms)
426                
427            else: #rand is default
428                start = torch.exp(1j*torch.rand((B,M,1))*torch.pi)
429
430        start=start.to(device).to(DTYPE)
431    
432    
433    # param = torch.nn.Parameter(start).to(device)
434    param = start.requires_grad_()
435    optim = optimiser([param],lr)
436    if scheduler is not None:
437        scheduler = scheduler(optim,**scheduler_args)
438
439
440    for epoch in range(iters):
441        optim.zero_grad()       
442
443        loss = objective(param, points, board, targets, **objective_params)
444
445        if log:
446            print(epoch, loss.data.item())
447
448        if maximise:
449            loss *= -1
450        
451        if return_loss:
452            losses.append(loss)
453        
454        if save_each_n > 0 and epoch % save_each_n == 0:
455            results[epoch] = param.clone().detach()
456        elif save_set_n is not None and epoch in save_set_n:
457            results[epoch] = param.clone().detach()
458
459        
460        loss.backward(torch.tensor([1]*B).to(device))
461        optim.step()
462        if scheduler is not None:
463            scheduler.step()
464        
465        if constrains is not None:
466            param.data = constrains(param)
467
468    if return_loss or save_each_n > 0:
469        return param, losses, results
470    
471
472    return param
473    
474
475def iterative_backpropagation(points:Tensor,iterations:int = 200, board:Tensor|None = None, A:Tensor|None = None, 
476                              b:Tensor|None=None, return_components: bool=False, p_ref=c.P_ref, k=c.k, transducer_radius = c.radius, norms=None) -> list[Tensor]:
477    '''
478    IB solver for transducer phases\n
479    :param points: Points to use
480    :param iterations: Number of iterations for WGS, default`200`
481    :param board: The Transducer array, default two 16x16 arrays
482    :param A: Forward model matrix to use 
483    :param b: initial guess - If none will use `torch.ones(N,1).to(device)+0j`
484    :param return_components: IF True will return `hologram image, point phases, hologram` else will return `hologram`, default False
485    :return: (point pressure ,point phases, hologram)
486
487    ```Python
488    from acoustools.Solvers import iterative_backpropagation
489    from acoustools.Utilities import create_points, propagate_abs
490
491    p = create_points(2,1)
492    print(p)
493    x = iterative_backpropagation(p)
494    print(propagate_abs(x,p))
495    
496    p = p.squeeze(0)
497    print(p)
498    x = iterative_backpropagation(p)
499    print(propagate_abs(x,p))
500
501    ```
502    '''
503
504    if board is None:
505        board  = TRANSDUCERS
506    
507    if len(points.shape) > 2:
508        N = points.shape[2]
509        batch=True
510    else:
511        N = points.shape[1]
512        batch=False
513
514    if A is None:
515        A = forward_model(points, board, p_ref=p_ref, k=k, transducer_radius=transducer_radius, norms=norms)
516    
517    if batch:
518        M = A.shape[2]
519    else:
520        M = A.shape[1]
521
522
523    if b is None:
524        b = torch.ones(N,1).to(device).to(DTYPE) +0j
525    
526    AT =  torch.conj(A).mT.to(device)
527    x = torch.ones(M,1).to(device).to(DTYPE) 
528    for kk in range(iterations):
529        p = A@x
530        p = constrain_field(p,b)
531        x = AT@p
532        x = constrain_amplitude(x)
533    y =  torch.abs(A@x) 
534    if return_components:
535        return y, p, x
536    else:
537        return x
538    
539
540
541def gorkov_target(points:Tensor, objective:FunctionType = target_gorkov_BEM_mse_sine_objective,
542                  board:Tensor=None, U_targets:Tensor=None, iterations:int=100, lr:int=1e9,
543                  constraint:FunctionType=sine_amplitude, reflector:Mesh|None=None, path:str|None=None) -> Tensor:
544    '''
545    Phase retrieval to generate target gorkov values at points via `acoustools.Solvers.gradient_descent_solver` \n
546    :param points: points of interest
547    :param objective: Objevtive function to minimise, default `acoustools.Optimise.Objectives.target_gorkov_BEM_mse_sine_objective`
548    :param board: Board to use. Default `acoustools.Utilities.TOP_BOARD`
549    :param U_targets: Target Gorkov values
550    :param iterations: Iterations to use for the solver
551    :param lr: learning rate
552    :param constraint: constraint function to use in the optimiser. default `acoustools.Optimise.Constraints.sine_amplitude`
553    :param reflector: Mesh to use as reflector or None
554    :param path: BEM path
555    :returns hologram:
556    '''
557
558    if board is None:
559        board = TOP_BOARD
560
561    if type(U_targets) == float:
562        U_targets = torch.tensor([U_targets])
563    
564    if U_targets is None:
565        U_targets = torch.tensor([-1e-5])
566
567    x = gradient_descent_solver(points, objective, 
568                                board, log=True, targets=U_targets, iters=iterations, 
569                                lr=lr, constrains=constraint, objective_params={'root':path,'reflector':reflector})
570    
571    return x
572
573
574def kd_solver(points:Tensor, board:Tensor|None = None,k=c.k):
575    '''
576    Solver for one point by setting phases to be equal at target point \\
577    see `A volumetric display for visual, tactile and audio presentation using acoustic trapping`\\
578    :param points: point to use - must be only one point
579    :param board: Transducers, if None will use two 16x16 arrays
580    :param k: wavenumber 
581    '''
582    B = points.shape[0]
583    M = board.shape[0]
584
585    b = board.unsqueeze(0).permute((0,2,1))
586    p = points.expand(B,3,M)
587
588    distance = torch.sqrt(torch.sum((p - b)**2,dim=1)).unsqueeze_(0).mT
589    distance = distance.to(device).to(DTYPE)
590    return torch.exp(1j * -1* distance*k)
591
592
593def translate_hologram(x:Tensor,board:Tensor|None=None, dx:float=0, dy:float=0, dz:float=0, k:float=c.k):
594
595    '''
596    Translates an existing hologram by (dx,dy,dz)\\
597    :param x: Hologram
598    :param board: Transducers, if None will use two 16x16 arrays
599    :param dx: x translation
600    :param dy: y translation
601    :param dz: z translation
602    :param k: wavenumber 
603    '''
604    
605    if board is None:
606        board = TRANSDUCERS
607
608    p2 =  create_points(1,1,dx,dy,dz)
609
610    B = p2.shape[0]
611    M = board.shape[0]
612
613    b = board.unsqueeze(0).permute((0,2,1))
614    p2 = p2.expand(B,3,M)
615
616    distance_p2 = torch.sqrt(torch.sum((p2 - b)**2,dim=1)).unsqueeze_(0).mT.to(device).to(DTYPE)
617    distance_p = torch.sqrt(torch.sum((b)**2,dim=1)).unsqueeze_(0).mT.to(device).to(DTYPE)
618
619    distance = distance_p-distance_p2
620
621    kd = (k*distance)
622
623    phase = torch.angle(x)
624    phase_2 = phase + kd
625    x2 = torch.exp(1j * phase_2)
626
627    return x2
def wgs( points: torch.Tensor, iterations: int = 200, board: torch.Tensor | None = None, A: torch.Tensor | None = None, b: torch.Tensor | None = None, return_components: bool = False, p_ref=3.4000000000000004, k=732.7329804081634, transducer_radius=0.0045, norms=None) -> torch.Tensor:
 63def wgs(points:Tensor,iterations:int = 200, board:Tensor|None = None, A:Tensor|None = None, b:Tensor|None=None, 
 64        return_components:bool=False, p_ref=c.P_ref, k=c.k, transducer_radius = c.radius, norms=None) -> Tensor:
 65    '''
 66    Weighted-GS algorithm\n
 67    :param points: Points to use
 68    :param iter: Number of iterations for WGS, default:`200`
 69    :param board: The Transducer array, If `None` uses default two 16x16 arrays
 70    :param A: Forward model matrix to use 
 71    :param b: initial guess - If none will use `torch.ones(N,1).to(device)+0j`
 72    :param return_components: IF True will return `hologram image, point phases, hologram` else will return `hologram`, default False
 73    :return: hologram
 74
 75    ```Python
 76    from acoustools.Solvers import wgs
 77    from acoustools.Utilities import create_points, propagate_abs
 78
 79    p = create_points(2,4)
 80    print(p)
 81    x = wgs(p)
 82    print(propagate_abs(x,p))
 83    
 84    p = p.squeeze(0)
 85    print(p)
 86    x = wgs(p)
 87    print(propagate_abs(x,p))
 88    ```
 89    '''
 90    if board is None:
 91        board = TRANSDUCERS
 92
 93    if len(points.shape) > 2:
 94        N = points.shape[2]
 95        batch=True
 96    else:
 97        N = points.shape[1]
 98        batch=False
 99
100    if A is None:
101        A = forward_model(points, board, p_ref=p_ref, k=k, transducer_radius=transducer_radius, norms=norms).to(DTYPE)
102    if b is None:
103        b = torch.ones(N,1).to(device).to(DTYPE)+0j
104
105    if batch:
106        img,pha,act = wgs_solver_batch(A,b,iterations)
107    else:
108        img,pha,act = wgs_solver_unbatched(A,b,iterations)
109
110    if return_components:
111        return img,pha,act
112    return act

Weighted-GS algorithm

Parameters
  • points: Points to use
  • iter: Number of iterations for WGS, default: 200
  • board: The Transducer array, If None uses default two 16x16 arrays
  • A: Forward model matrix to use
  • b: initial guess - If none will use torch.ones(N,1).to(device)+0j
  • return_components: IF True will return hologram image, point phases, hologram else will return hologram, default False
Returns

hologram

from acoustools.Solvers import wgs
from acoustools.Utilities import create_points, propagate_abs

p = create_points(2,4)
print(p)
x = wgs(p)
print(propagate_abs(x,p))

p = p.squeeze(0)
print(p)
x = wgs(p)
print(propagate_abs(x,p))
def gspat( points: torch.Tensor | None = None, board: torch.Tensor | None = None, A: torch.Tensor | None = None, B: torch.Tensor | None = None, R: torch.Tensor | None = None, b: torch.Tensor | None = None, iterations: int = 200, return_components: bool = False, p_ref=3.4000000000000004, k=732.7329804081634, transducer_radius=0.0045, norms=None) -> torch.Tensor:
150def gspat(points:Tensor|None=None, board:Tensor|None=None,A:Tensor|None=None,B:Tensor|None=None, 
151          R:Tensor|None=None ,b:Tensor|None = None, iterations:int=200, return_components:bool=False, p_ref=c.P_ref, k=c.k, transducer_radius = c.radius, norms=None) -> Tensor:
152    '''
153    GSPAT Solver\n
154    :param points: Target point positions
155    :param board: The Transducer array, if None uses default two 16x16 arrays
156    :param A: The Forward propagation matrix, if `None` will be computed 
157    :param B: The backwards propagation matrix, if `None` will be computed 
158    :param R: The R propagation matrix, if `None` will be computed 
159    :param b: initial guess - If None will use `torch.ones(N,1).to(device)+0j`
160    :param iterations: Number of iterations to use
161    :param return_components: IF True will return `hologram, pressure` else will return `hologram`, default True
162    :return: Hologram
163    '''
164
165    if board is None:
166        board = TRANSDUCERS
167
168    if A is None:
169        A = forward_model(points,board,p_ref=p_ref,norms=norms, k=k, transducer_radius=transducer_radius)
170    if B is None:
171        B = torch.conj(A).mT.to(DTYPE)
172    if R is None:
173        R = A@B
174
175    if b is None:
176        if is_batched_points(points):
177            b = torch.ones(points.shape[2],1).to(device).to(DTYPE)
178        else:
179            b = torch.ones(points.shape[1],1).to(device).to(DTYPE)
180    
181    
182    if return_components:
183        phase_hologram,pres = gspat_solver(R,A,B,b, iterations,return_components)
184        return phase_hologram,pres
185    
186    phase_hologram = gspat_solver(R,A,B,b, iterations,return_components)
187    return phase_hologram

GSPAT Solver

Parameters
  • points: Target point positions
  • board: The Transducer array, if None uses default two 16x16 arrays
  • A: The Forward propagation matrix, if None will be computed
  • B: The backwards propagation matrix, if None will be computed
  • R: The R propagation matrix, if None will be computed
  • b: initial guess - If None will use torch.ones(N,1).to(device)+0j
  • iterations: Number of iterations to use
  • return_components: IF True will return hologram, pressure else will return hologram, default True
Returns

Hologram

def naive( points: torch.Tensor, board: torch.Tensor | None = None, return_components: bool = False, activation: torch.Tensor | None = None, A=None, p_ref=3.4000000000000004, k=732.7329804081634, transducer_radius=0.0045, norms=None, iterations=None) -> torch.Tensor:
234def naive(points:Tensor, board:Tensor|None = None, return_components:bool=False, activation:Tensor|None=None, A=None, 
235          p_ref=c.P_ref, k=c.k, transducer_radius = c.radius, norms=None, iterations=None) -> Tensor:
236    '''
237    Naive solver\n
238    :param points: Target point positions
239    :param board: The Transducer array, default two 16x16 arrays
240    :param return_components: If `True` will return `hologram, pr
241    :param iterations: Ignroed - for compatability
242    :param A: propagator to use
243    :return: hologram
244    '''
245    if board is None:
246        board = TRANSDUCERS
247    if is_batched_points(points):
248        out,act = naive_solver_batched(points,board=board, activation=activation, A=A, p_ref=p_ref, k=k, transducer_radius=transducer_radius, norms=norms)
249    else:
250        out,act = naive_solver_unbatched(points,board=board, activation=activation, A=A, p_ref=p_ref, k=k, transducer_radius=transducer_radius, norms=norms)
251    if return_components:
252        return act, out
253    return act

Naive solver

Parameters
  • points: Target point positions
  • board: The Transducer array, default two 16x16 arrays
  • return_components: If True will return `hologram, pr
  • iterations: Ignroed - for compatability
  • A: propagator to use
Returns

hologram

def temporal_wgs( A: torch.Tensor, y: torch.Tensor, K: int, ref_in: torch.Tensor, ref_out: torch.Tensor, T_in: float, T_out: float) -> torch.Tensor:
313def temporal_wgs(A:Tensor, y:Tensor, K:int,ref_in:Tensor, ref_out:Tensor,T_in:float,T_out:float) -> Tensor:
314    '''
315    Based off `
316    Giorgos Christopoulos, Lei Gao, Diego Martinez Plasencia, Marta Betcke, 
317    Ryuji Hirayama, and Sriram Subramanian. 2023. 
318    Temporal acoustic point holography. (2024) https://doi.org/10.1145/3641519.3657443 \n
319    WGS solver for hologram where the phase change between frames is constrained \n
320    :param A: Forward model  to use
321    :param y: initial guess to use normally use `torch.ones(self.N,1).to(device)+0j`
322    :param K: Number of iterations to use
323    :param ref_in: Previous timesteps hologram
324    :param ref_out: Previous timesteps point activations
325    :param T_in: Hologram phase change threshold
326    :param T_out: Point activations phase change threshold
327    :return: (hologram image, point phases, hologram)
328    
329
330    '''
331    #ref_out -> points
332    #ref_in-> transducers
333    AT = torch.conj(A).mT.to(device)
334    y0 = y.to(device)
335    x = torch.ones(A.shape[2],1).to(device) + 0j
336    for kk in range(K):
337        z = torch.matmul(A,x)                                   # forward propagate
338        z = z/torch.max(torch.abs(z),dim=1,keepdim=True).values # normalize forward propagated field (useful for next step's division)
339        z = ph_thresh(ref_out,z,T_out); 
340        
341        y = torch.multiply(y0,torch.divide(y,torch.abs(z)))     # update target - current target over normalized field
342        y = y/torch.max(torch.abs(y),dim=1,keepdim=True).values # normalize target
343        p = torch.multiply(y,torch.divide(z,torch.abs(z)))      # keep phase, apply target amplitude
344        r = torch.matmul(AT,p)                                  # backward propagate
345        x = torch.divide(r,torch.abs(r))                        # keep phase for hologram    
346        x = ph_thresh(ref_in,x,T_in);    
347    return y, p, x

Based off ` Giorgos Christopoulos, Lei Gao, Diego Martinez Plasencia, Marta Betcke, Ryuji Hirayama, and Sriram Subramanian. 2023. Temporal acoustic point holography. (2024) https://doi.org/10.1145/3641519.3657443

WGS solver for hologram where the phase change between frames is constrained

Parameters
  • A: Forward model to use
  • y: initial guess to use normally use torch.ones(self.N,1).to(device)+0j
  • K: Number of iterations to use
  • ref_in: Previous timesteps hologram
  • ref_out: Previous timesteps point activations
  • T_in: Hologram phase change threshold
  • T_out: Point activations phase change threshold
Returns

(hologram image, point phases, hologram)

def gradient_descent_solver( points: torch.Tensor, objective: function, board: torch.Tensor | None = None, optimiser: torch.optim.optimizer.Optimizer = <class 'torch.optim.adam.Adam'>, lr: float = 0.01, objective_params: dict = {}, start: torch.Tensor | None = None, iters: int = 200, maximise: bool = False, targets: torch.Tensor = None, constrains: function = <function constrain_phase_only>, log: bool = False, return_loss: bool = False, scheduler: torch.optim.lr_scheduler.LRScheduler = None, scheduler_args: dict = None, save_each_n: int = 0, save_set_n: list[int] = None, init_type: Union[Literal['rand', 'ones', 'focal', 'trap'], torch.Tensor] = 'rand', p_ref=3.4000000000000004, k=732.7329804081634, transducer_radius=0.0045, norms=None) -> torch.Tensor:
354def gradient_descent_solver(points: Tensor, objective: FunctionType, board:Tensor|None=None, optimiser:torch.optim.Optimizer=torch.optim.Adam, lr: float=0.01, 
355                            objective_params:dict={}, start:Tensor|None=None, iters:int=200, 
356                            maximise:bool=False, targets:Tensor=None, constrains:FunctionType=constrain_phase_only, log:bool=False, return_loss:bool=False,
357                            scheduler:torch.optim.lr_scheduler.LRScheduler=None, scheduler_args:dict=None, save_each_n:int = 0, save_set_n:list[int] = None,
358                            init_type:Literal['rand', 'ones','focal','trap']|Tensor='rand',
359                            p_ref=c.P_ref, k=c.k, transducer_radius = c.radius, norms=None) -> Tensor:
360    '''
361    Solves phases using gradient descent\n
362    :param points: Target point positions 
363    :param objective: Objective function - must take have an input of (`transducer_phases, points, board, targets, **objective_params`), `targets` may be `None` for unsupervised
364    :param board: The Transducer array, default two 16x16 arrays
365    :param optimiser: Optimiser to use (should be compatable with the interface from from `torch.optim`). Default `torch.optim.Adam`
366    :param lr: Learning Rate to use. Default `0.01`
367    :param objective_params: Any parameters to be passed to `objective` as a dictionary of `{parameter_name:parameter_value}` pairs. Default `{}`
368    :param start: Initial guess. If None will default to a random initilsation of phases 
369    :param iters: Number of optimisation Iterations. Default 200
370    :param maximise: Set to `True` to maximise the objective, else minimise. Default `False`
371    :param targets: Targets to optimise towards for supervised optimisation, unsupervised if set to `None`. Default `None`
372    :param constrains: Constraints to apply to result 
373    :param log: If `True` prints the objective values at each step. Default `False`
374    :param return_loss: If `True` save and return objective values for each step as well as the optimised result 
375    :param scheduler: Learning rate scheduler to use, if `None` no scheduler is used. Default `None` 
376    :param scheduler_args: Parameters to pass to `scheduler`
377    :param save_each_n: For n>0 will save the optimiser results at every n steps. Set either `save_each_n` or `save_set_iters`
378    :param save_set_iters: List containing exact iterations to save optimiser results at. Set either `save_each_n` or `save_set_iters`
379    :param init_type: type of initialisation to use. rand:random, ones:tensor of 1, focal:naive focal point, trap:two focal points offset in the z-axis
380    :return: optimised result and optionally the objective values and results (see `return_loss`, `save_each_n` and `save_set_iters`). If either are returned both will be returned but maybe empty if not asked for
381    
382    ```Python
383    from acoustools.Optimise.Objectives import propagate_abs_sum_objective
384    from acoustools.Solvers import gradient_descent_solver
385    from acoustools.Optimise.Constraints import constrain_phase_only
386
387    p = create_points(4,2)
388    x = gradient_descent_solver(p,propagate_abs_sum_objective, 
389                                maximise=True, constrains=constrain_phase_only, 
390                                log=False, lr=1e-1)
391
392    print(propagate_abs(x,p))
393
394    ```
395    ''' 
396
397
398    if board is None:
399        board = TRANSDUCERS
400
401    losses = []
402    results = {}
403    B = points.shape[0] if points is not None else 1
404    N = points.shape[2] if points is not None else 1
405    M = board.shape[0]
406    if start is None:
407        # start = torch.ones((B,M,1)).to(device) +0j
408        if type(init_type) == Tensor:
409            start = init_type
410        else: 
411            if init_type == 'ones':
412                start = torch.ones((B,M,1))
413            elif init_type == 'focal':
414    
415                start = naive(points, board=board,return_components=False, p_ref=p_ref, k=k, transducer_radius=transducer_radius, norms=norms)
416            elif init_type == 'trap':
417                new_points = points.expand(B,3,2*N).clone()
418                SCALE = 2
419                new_points[:,2,:N] -= Constants.wavelength / SCALE
420                new_points[:,2,N:] += Constants.wavelength / SCALE
421                target_phases = torch.zeros(B,2*N)
422                # target_phases[:,N:] = Constants.pi
423                activation = torch.exp(1j * target_phases).unsqueeze(2).to(device)
424                
425            
426                start = naive(new_points, board, return_components=False, activation=activation, p_ref=p_ref, k=k, transducer_radius=transducer_radius, norms=norms)
427                
428            else: #rand is default
429                start = torch.exp(1j*torch.rand((B,M,1))*torch.pi)
430
431        start=start.to(device).to(DTYPE)
432    
433    
434    # param = torch.nn.Parameter(start).to(device)
435    param = start.requires_grad_()
436    optim = optimiser([param],lr)
437    if scheduler is not None:
438        scheduler = scheduler(optim,**scheduler_args)
439
440
441    for epoch in range(iters):
442        optim.zero_grad()       
443
444        loss = objective(param, points, board, targets, **objective_params)
445
446        if log:
447            print(epoch, loss.data.item())
448
449        if maximise:
450            loss *= -1
451        
452        if return_loss:
453            losses.append(loss)
454        
455        if save_each_n > 0 and epoch % save_each_n == 0:
456            results[epoch] = param.clone().detach()
457        elif save_set_n is not None and epoch in save_set_n:
458            results[epoch] = param.clone().detach()
459
460        
461        loss.backward(torch.tensor([1]*B).to(device))
462        optim.step()
463        if scheduler is not None:
464            scheduler.step()
465        
466        if constrains is not None:
467            param.data = constrains(param)
468
469    if return_loss or save_each_n > 0:
470        return param, losses, results
471    
472
473    return param

Solves phases using gradient descent

Parameters
  • points: Target point positions
  • objective: Objective function - must take have an input of (transducer_phases, points, board, targets, **objective_params), targets may be None for unsupervised
  • board: The Transducer array, default two 16x16 arrays
  • optimiser: Optimiser to use (should be compatable with the interface from from torch.optim). Default torch.optim.Adam
  • lr: Learning Rate to use. Default 0.01
  • **objective_params: Any parameters to be passed to objective as a dictionary of {parameter_name**: parameter_value} pairs. Default {}
  • start: Initial guess. If None will default to a random initilsation of phases
  • iters: Number of optimisation Iterations. Default 200
  • maximise: Set to True to maximise the objective, else minimise. Default False
  • targets: Targets to optimise towards for supervised optimisation, unsupervised if set to None. Default None
  • constrains: Constraints to apply to result
  • log: If True prints the objective values at each step. Default False
  • return_loss: If True save and return objective values for each step as well as the optimised result
  • scheduler: Learning rate scheduler to use, if None no scheduler is used. Default None
  • scheduler_args: Parameters to pass to scheduler
  • save_each_n: For n>0 will save the optimiser results at every n steps. Set either save_each_n or save_set_iters
  • save_set_iters: List containing exact iterations to save optimiser results at. Set either save_each_n or save_set_iters
  • init_type: type of initialisation to use. rand:random, ones:tensor of 1, focal:naive focal point, trap: two focal points offset in the z-axis
Returns

optimised result and optionally the objective values and results (see return_loss, save_each_n and save_set_iters). If either are returned both will be returned but maybe empty if not asked for

from acoustools.Optimise.Objectives import propagate_abs_sum_objective
from acoustools.Solvers import gradient_descent_solver
from acoustools.Optimise.Constraints import constrain_phase_only

p = create_points(4,2)
x = gradient_descent_solver(p,propagate_abs_sum_objective, 
                            maximise=True, constrains=constrain_phase_only, 
                            log=False, lr=1e-1)

print(propagate_abs(x,p))
def iterative_backpropagation( points: torch.Tensor, iterations: int = 200, board: torch.Tensor | None = None, A: torch.Tensor | None = None, b: torch.Tensor | None = None, return_components: bool = False, p_ref=3.4000000000000004, k=732.7329804081634, transducer_radius=0.0045, norms=None) -> list[torch.Tensor]:
476def iterative_backpropagation(points:Tensor,iterations:int = 200, board:Tensor|None = None, A:Tensor|None = None, 
477                              b:Tensor|None=None, return_components: bool=False, p_ref=c.P_ref, k=c.k, transducer_radius = c.radius, norms=None) -> list[Tensor]:
478    '''
479    IB solver for transducer phases\n
480    :param points: Points to use
481    :param iterations: Number of iterations for WGS, default`200`
482    :param board: The Transducer array, default two 16x16 arrays
483    :param A: Forward model matrix to use 
484    :param b: initial guess - If none will use `torch.ones(N,1).to(device)+0j`
485    :param return_components: IF True will return `hologram image, point phases, hologram` else will return `hologram`, default False
486    :return: (point pressure ,point phases, hologram)
487
488    ```Python
489    from acoustools.Solvers import iterative_backpropagation
490    from acoustools.Utilities import create_points, propagate_abs
491
492    p = create_points(2,1)
493    print(p)
494    x = iterative_backpropagation(p)
495    print(propagate_abs(x,p))
496    
497    p = p.squeeze(0)
498    print(p)
499    x = iterative_backpropagation(p)
500    print(propagate_abs(x,p))
501
502    ```
503    '''
504
505    if board is None:
506        board  = TRANSDUCERS
507    
508    if len(points.shape) > 2:
509        N = points.shape[2]
510        batch=True
511    else:
512        N = points.shape[1]
513        batch=False
514
515    if A is None:
516        A = forward_model(points, board, p_ref=p_ref, k=k, transducer_radius=transducer_radius, norms=norms)
517    
518    if batch:
519        M = A.shape[2]
520    else:
521        M = A.shape[1]
522
523
524    if b is None:
525        b = torch.ones(N,1).to(device).to(DTYPE) +0j
526    
527    AT =  torch.conj(A).mT.to(device)
528    x = torch.ones(M,1).to(device).to(DTYPE) 
529    for kk in range(iterations):
530        p = A@x
531        p = constrain_field(p,b)
532        x = AT@p
533        x = constrain_amplitude(x)
534    y =  torch.abs(A@x) 
535    if return_components:
536        return y, p, x
537    else:
538        return x

IB solver for transducer phases

Parameters
  • points: Points to use
  • iterations: Number of iterations for WGS, default200
  • board: The Transducer array, default two 16x16 arrays
  • A: Forward model matrix to use
  • b: initial guess - If none will use torch.ones(N,1).to(device)+0j
  • return_components: IF True will return hologram image, point phases, hologram else will return hologram, default False
Returns

(point pressure ,point phases, hologram)

from acoustools.Solvers import iterative_backpropagation
from acoustools.Utilities import create_points, propagate_abs

p = create_points(2,1)
print(p)
x = iterative_backpropagation(p)
print(propagate_abs(x,p))

p = p.squeeze(0)
print(p)
x = iterative_backpropagation(p)
print(propagate_abs(x,p))
def gorkov_target( points: torch.Tensor, objective: function = <function target_gorkov_BEM_mse_sine_objective>, board: torch.Tensor = None, U_targets: torch.Tensor = None, iterations: int = 100, lr: int = 1000000000.0, constraint: function = <function sine_amplitude>, reflector: vedo.mesh.Mesh | None = None, path: str | None = None) -> torch.Tensor:
542def gorkov_target(points:Tensor, objective:FunctionType = target_gorkov_BEM_mse_sine_objective,
543                  board:Tensor=None, U_targets:Tensor=None, iterations:int=100, lr:int=1e9,
544                  constraint:FunctionType=sine_amplitude, reflector:Mesh|None=None, path:str|None=None) -> Tensor:
545    '''
546    Phase retrieval to generate target gorkov values at points via `acoustools.Solvers.gradient_descent_solver` \n
547    :param points: points of interest
548    :param objective: Objevtive function to minimise, default `acoustools.Optimise.Objectives.target_gorkov_BEM_mse_sine_objective`
549    :param board: Board to use. Default `acoustools.Utilities.TOP_BOARD`
550    :param U_targets: Target Gorkov values
551    :param iterations: Iterations to use for the solver
552    :param lr: learning rate
553    :param constraint: constraint function to use in the optimiser. default `acoustools.Optimise.Constraints.sine_amplitude`
554    :param reflector: Mesh to use as reflector or None
555    :param path: BEM path
556    :returns hologram:
557    '''
558
559    if board is None:
560        board = TOP_BOARD
561
562    if type(U_targets) == float:
563        U_targets = torch.tensor([U_targets])
564    
565    if U_targets is None:
566        U_targets = torch.tensor([-1e-5])
567
568    x = gradient_descent_solver(points, objective, 
569                                board, log=True, targets=U_targets, iters=iterations, 
570                                lr=lr, constrains=constraint, objective_params={'root':path,'reflector':reflector})
571    
572    return x

Phase retrieval to generate target gorkov values at points via acoustools.Solvers.gradient_descent_solver

Parameters
  • points: points of interest
  • objective: Objevtive function to minimise, default acoustools.Optimise.Objectives.target_gorkov_BEM_mse_sine_objective
  • board: Board to use. Default acoustools.Utilities.TOP_BOARD
  • U_targets: Target Gorkov values
  • iterations: Iterations to use for the solver
  • lr: learning rate
  • constraint: constraint function to use in the optimiser. default acoustools.Optimise.Constraints.sine_amplitude
  • reflector: Mesh to use as reflector or None
  • path: BEM path :returns hologram:
def kd_solver( points: torch.Tensor, board: torch.Tensor | None = None, k=732.7329804081634):
575def kd_solver(points:Tensor, board:Tensor|None = None,k=c.k):
576    '''
577    Solver for one point by setting phases to be equal at target point \\
578    see `A volumetric display for visual, tactile and audio presentation using acoustic trapping`\\
579    :param points: point to use - must be only one point
580    :param board: Transducers, if None will use two 16x16 arrays
581    :param k: wavenumber 
582    '''
583    B = points.shape[0]
584    M = board.shape[0]
585
586    b = board.unsqueeze(0).permute((0,2,1))
587    p = points.expand(B,3,M)
588
589    distance = torch.sqrt(torch.sum((p - b)**2,dim=1)).unsqueeze_(0).mT
590    distance = distance.to(device).to(DTYPE)
591    return torch.exp(1j * -1* distance*k)

Solver for one point by setting phases to be equal at target point \ see A volumetric display for visual, tactile and audio presentation using acoustic trapping\

Parameters
  • points: point to use - must be only one point
  • board: Transducers, if None will use two 16x16 arrays
  • k: wavenumber
def translate_hologram( x: torch.Tensor, board: torch.Tensor | None = None, dx: float = 0, dy: float = 0, dz: float = 0, k: float = 732.7329804081634):
594def translate_hologram(x:Tensor,board:Tensor|None=None, dx:float=0, dy:float=0, dz:float=0, k:float=c.k):
595
596    '''
597    Translates an existing hologram by (dx,dy,dz)\\
598    :param x: Hologram
599    :param board: Transducers, if None will use two 16x16 arrays
600    :param dx: x translation
601    :param dy: y translation
602    :param dz: z translation
603    :param k: wavenumber 
604    '''
605    
606    if board is None:
607        board = TRANSDUCERS
608
609    p2 =  create_points(1,1,dx,dy,dz)
610
611    B = p2.shape[0]
612    M = board.shape[0]
613
614    b = board.unsqueeze(0).permute((0,2,1))
615    p2 = p2.expand(B,3,M)
616
617    distance_p2 = torch.sqrt(torch.sum((p2 - b)**2,dim=1)).unsqueeze_(0).mT.to(device).to(DTYPE)
618    distance_p = torch.sqrt(torch.sum((b)**2,dim=1)).unsqueeze_(0).mT.to(device).to(DTYPE)
619
620    distance = distance_p-distance_p2
621
622    kd = (k*distance)
623
624    phase = torch.angle(x)
625    phase_2 = phase + kd
626    x2 = torch.exp(1j * phase_2)
627
628    return x2

Translates an existing hologram by (dx,dy,dz)\

Parameters
  • x: Hologram
  • board: Transducers, if None will use two 16x16 arrays
  • dx: x translation
  • dy: y translation
  • dz: z translation
  • k: wavenumber