fida_weights_mc Subroutine

public subroutine fida_weights_mc()

Calculates FIDA weights

Arguments

None

Calls

proc~~fida_weights_mc~~CallsGraph proc~fida_weights_mc fida_weights_mc proc~get_fields get_fields proc~fida_weights_mc->proc~get_fields interface~randind randind proc~fida_weights_mc->interface~randind proc~write_fida_weights write_fida_weights proc~fida_weights_mc->proc~write_fida_weights proc~get_nlaunch get_nlaunch proc~fida_weights_mc->proc~get_nlaunch proc~ind2sub ind2sub proc~fida_weights_mc->proc~ind2sub proc~get_plasma get_plasma proc~fida_weights_mc->proc~get_plasma proc~gyro_correction gyro_correction proc~fida_weights_mc->proc~gyro_correction proc~track track proc~fida_weights_mc->proc~track interface~randu randu proc~fida_weights_mc->interface~randu proc~get_ep_denf get_ep_denf proc~fida_weights_mc->proc~get_ep_denf interface~parallel_sum parallel_sum proc~fida_weights_mc->interface~parallel_sum proc~my_rank my_rank proc~fida_weights_mc->proc~my_rank proc~calc_perp_vectors calc_perp_vectors proc~get_fields->proc~calc_perp_vectors proc~uvw_to_xyz uvw_to_xyz proc~get_fields->proc~uvw_to_xyz proc~xyz_to_uvw xyz_to_uvw proc~get_fields->proc~xyz_to_uvw proc~in_plasma in_plasma proc~get_fields->proc~in_plasma h5fclose_f h5fclose_f proc~write_fida_weights->h5fclose_f h5ltmake_dataset_int_f h5ltmake_dataset_int_f proc~write_fida_weights->h5ltmake_dataset_int_f h5close_f h5close_f proc~write_fida_weights->h5close_f h5ltset_attribute_string_f h5ltset_attribute_string_f proc~write_fida_weights->h5ltset_attribute_string_f interface~h5ltmake_compressed_dataset_double_f h5ltmake_compressed_dataset_double_f proc~write_fida_weights->interface~h5ltmake_compressed_dataset_double_f h5fcreate_f h5fcreate_f proc~write_fida_weights->h5fcreate_f h5open_f h5open_f proc~write_fida_weights->h5open_f interface~randind_cdf randind_cdf proc~get_nlaunch->interface~randind_cdf proc~cumsum cumsum proc~get_nlaunch->proc~cumsum proc~rng_init rng_init proc~get_nlaunch->proc~rng_init proc~get_plasma->proc~uvw_to_xyz proc~get_position get_position proc~get_plasma->proc~get_position proc~get_plasma->proc~xyz_to_uvw proc~get_plasma->proc~in_plasma proc~gyro_correction->interface~randu proc~pitch_to_vec pitch_to_vec proc~gyro_correction->proc~pitch_to_vec proc~gyro_step gyro_step proc~gyro_correction->proc~gyro_step proc~track->proc~get_fields proc~get_indices get_indices proc~track->proc~get_indices proc~track->proc~in_plasma proc~doppler_stark doppler_stark proc~track->proc~doppler_stark proc~get_ep_denf->proc~xyz_to_uvw interface~interpol interpol proc~get_ep_denf->interface~interpol proc~parallel_sum_d4 parallel_sum_d4 interface~parallel_sum->proc~parallel_sum_d4 proc~parallel_sum_d5 parallel_sum_d5 interface~parallel_sum->proc~parallel_sum_d5 proc~parallel_sum_d2 parallel_sum_d2 interface~parallel_sum->proc~parallel_sum_d2 proc~parallel_sum_d3 parallel_sum_d3 interface~parallel_sum->proc~parallel_sum_d3 proc~parallel_sum_d0 parallel_sum_d0 interface~parallel_sum->proc~parallel_sum_d0 proc~parallel_sum_d1 parallel_sum_d1 interface~parallel_sum->proc~parallel_sum_d1 proc~parallel_sum_i1 parallel_sum_i1 interface~parallel_sum->proc~parallel_sum_i1 proc~parallel_sum_i0 parallel_sum_i0 interface~parallel_sum->proc~parallel_sum_i0 proc~parallel_sum_i2 parallel_sum_i2 interface~parallel_sum->proc~parallel_sum_i2 mpi_allreduce mpi_allreduce proc~parallel_sum_d4->mpi_allreduce proc~cyl_to_xyz cyl_to_xyz proc~get_position->proc~cyl_to_xyz proc~cross_product cross_product proc~gyro_step->proc~cross_product proc~parallel_sum_d5->mpi_allreduce proc~parallel_sum_d2->mpi_allreduce proc~parallel_sum_d3->mpi_allreduce proc~parallel_sum_d0->mpi_allreduce proc~parallel_sum_d1->mpi_allreduce proc~parallel_sum_i1->mpi_allreduce proc~parallel_sum_i0->mpi_allreduce proc~parallel_sum_i2->mpi_allreduce proc~in_plasma->proc~xyz_to_uvw interface~interpol_coeff interpol_coeff proc~in_plasma->interface~interpol_coeff proc~cyl_to_uvw cyl_to_uvw proc~in_plasma->proc~cyl_to_uvw proc~h5ltmake_compressed_dataset_double_f_7 h5ltmake_compressed_dataset_double_f_7 interface~h5ltmake_compressed_dataset_double_f->proc~h5ltmake_compressed_dataset_double_f_7 proc~h5ltmake_compressed_dataset_double_f_6 h5ltmake_compressed_dataset_double_f_6 interface~h5ltmake_compressed_dataset_double_f->proc~h5ltmake_compressed_dataset_double_f_6 proc~h5ltmake_compressed_dataset_double_f_5 h5ltmake_compressed_dataset_double_f_5 interface~h5ltmake_compressed_dataset_double_f->proc~h5ltmake_compressed_dataset_double_f_5 proc~h5ltmake_compressed_dataset_double_f_4 h5ltmake_compressed_dataset_double_f_4 interface~h5ltmake_compressed_dataset_double_f->proc~h5ltmake_compressed_dataset_double_f_4 proc~h5ltmake_compressed_dataset_double_f_3 h5ltmake_compressed_dataset_double_f_3 interface~h5ltmake_compressed_dataset_double_f->proc~h5ltmake_compressed_dataset_double_f_3 proc~h5ltmake_compressed_dataset_double_f_2 h5ltmake_compressed_dataset_double_f_2 interface~h5ltmake_compressed_dataset_double_f->proc~h5ltmake_compressed_dataset_double_f_2 proc~h5ltmake_compressed_dataset_double_f_1 h5ltmake_compressed_dataset_double_f_1 interface~h5ltmake_compressed_dataset_double_f->proc~h5ltmake_compressed_dataset_double_f_1 proc~rng_init->proc~my_rank proc~rng_seed rng_seed proc~rng_init->proc~rng_seed proc~interpol3d_arr interpol3D_arr interface~interpol->proc~interpol3d_arr proc~interpol2d_2d_arr interpol2D_2D_arr interface~interpol->proc~interpol2d_2d_arr proc~interpol2d_arr interpol2D_arr interface~interpol->proc~interpol2d_arr proc~interpol1d_arr interpol1D_arr interface~interpol->proc~interpol1d_arr proc~interpol3d_2d_arr interpol3D_2D_arr interface~interpol->proc~interpol3d_2d_arr h5dwrite_f h5dwrite_f proc~h5ltmake_compressed_dataset_double_f_7->h5dwrite_f h5dclose_f h5dclose_f proc~h5ltmake_compressed_dataset_double_f_7->h5dclose_f h5sclose_f h5sclose_f proc~h5ltmake_compressed_dataset_double_f_7->h5sclose_f h5ltmake_dataset_double_f h5ltmake_dataset_double_f proc~h5ltmake_compressed_dataset_double_f_7->h5ltmake_dataset_double_f h5pset_chunk_f h5pset_chunk_f proc~h5ltmake_compressed_dataset_double_f_7->h5pset_chunk_f h5pcreate_f h5pcreate_f proc~h5ltmake_compressed_dataset_double_f_7->h5pcreate_f proc~chunk_size chunk_size proc~h5ltmake_compressed_dataset_double_f_7->proc~chunk_size h5pset_deflate_f h5pset_deflate_f proc~h5ltmake_compressed_dataset_double_f_7->h5pset_deflate_f h5pset_shuffle_f h5pset_shuffle_f proc~h5ltmake_compressed_dataset_double_f_7->h5pset_shuffle_f h5screate_simple_f h5screate_simple_f proc~h5ltmake_compressed_dataset_double_f_7->h5screate_simple_f h5pclose_f h5pclose_f proc~h5ltmake_compressed_dataset_double_f_7->h5pclose_f h5dcreate_f h5dcreate_f proc~h5ltmake_compressed_dataset_double_f_7->h5dcreate_f proc~cyl_interpol3d_coeff_arr cyl_interpol3D_coeff_arr interface~interpol_coeff->proc~cyl_interpol3d_coeff_arr proc~interpol2d_coeff interpol2D_coeff interface~interpol_coeff->proc~interpol2d_coeff proc~interpol2d_coeff_arr interpol2D_coeff_arr interface~interpol_coeff->proc~interpol2d_coeff_arr proc~interpol1d_coeff interpol1D_coeff interface~interpol_coeff->proc~interpol1d_coeff proc~cyl_interpol3d_coeff cyl_interpol3D_coeff interface~interpol_coeff->proc~cyl_interpol3d_coeff proc~interpol1d_coeff_arr interpol1D_coeff_arr interface~interpol_coeff->proc~interpol1d_coeff_arr proc~cyl_to_xyz->proc~uvw_to_xyz proc~cyl_to_xyz->proc~cyl_to_uvw proc~interpol3d_arr->interface~interpol_coeff proc~interpol2d_2d_arr->interface~interpol_coeff proc~h5ltmake_compressed_dataset_double_f_6->h5dwrite_f proc~h5ltmake_compressed_dataset_double_f_6->h5dclose_f proc~h5ltmake_compressed_dataset_double_f_6->h5sclose_f proc~h5ltmake_compressed_dataset_double_f_6->h5ltmake_dataset_double_f proc~h5ltmake_compressed_dataset_double_f_6->h5pset_chunk_f proc~h5ltmake_compressed_dataset_double_f_6->h5pcreate_f proc~h5ltmake_compressed_dataset_double_f_6->proc~chunk_size proc~h5ltmake_compressed_dataset_double_f_6->h5pset_deflate_f proc~h5ltmake_compressed_dataset_double_f_6->h5pset_shuffle_f proc~h5ltmake_compressed_dataset_double_f_6->h5screate_simple_f proc~h5ltmake_compressed_dataset_double_f_6->h5pclose_f proc~h5ltmake_compressed_dataset_double_f_6->h5dcreate_f proc~h5ltmake_compressed_dataset_double_f_5->h5dwrite_f proc~h5ltmake_compressed_dataset_double_f_5->h5dclose_f proc~h5ltmake_compressed_dataset_double_f_5->h5sclose_f proc~h5ltmake_compressed_dataset_double_f_5->h5ltmake_dataset_double_f proc~h5ltmake_compressed_dataset_double_f_5->h5pset_chunk_f proc~h5ltmake_compressed_dataset_double_f_5->h5pcreate_f proc~h5ltmake_compressed_dataset_double_f_5->proc~chunk_size proc~h5ltmake_compressed_dataset_double_f_5->h5pset_deflate_f proc~h5ltmake_compressed_dataset_double_f_5->h5pset_shuffle_f proc~h5ltmake_compressed_dataset_double_f_5->h5screate_simple_f proc~h5ltmake_compressed_dataset_double_f_5->h5pclose_f proc~h5ltmake_compressed_dataset_double_f_5->h5dcreate_f proc~h5ltmake_compressed_dataset_double_f_4->h5dwrite_f proc~h5ltmake_compressed_dataset_double_f_4->h5dclose_f proc~h5ltmake_compressed_dataset_double_f_4->h5sclose_f proc~h5ltmake_compressed_dataset_double_f_4->h5ltmake_dataset_double_f proc~h5ltmake_compressed_dataset_double_f_4->h5pset_chunk_f proc~h5ltmake_compressed_dataset_double_f_4->h5pcreate_f proc~h5ltmake_compressed_dataset_double_f_4->proc~chunk_size proc~h5ltmake_compressed_dataset_double_f_4->h5pset_deflate_f proc~h5ltmake_compressed_dataset_double_f_4->h5pset_shuffle_f proc~h5ltmake_compressed_dataset_double_f_4->h5screate_simple_f proc~h5ltmake_compressed_dataset_double_f_4->h5pclose_f proc~h5ltmake_compressed_dataset_double_f_4->h5dcreate_f proc~h5ltmake_compressed_dataset_double_f_3->h5dwrite_f proc~h5ltmake_compressed_dataset_double_f_3->h5dclose_f proc~h5ltmake_compressed_dataset_double_f_3->h5sclose_f proc~h5ltmake_compressed_dataset_double_f_3->h5ltmake_dataset_double_f proc~h5ltmake_compressed_dataset_double_f_3->h5pset_chunk_f proc~h5ltmake_compressed_dataset_double_f_3->h5pcreate_f proc~h5ltmake_compressed_dataset_double_f_3->proc~chunk_size proc~h5ltmake_compressed_dataset_double_f_3->h5pset_deflate_f proc~h5ltmake_compressed_dataset_double_f_3->h5pset_shuffle_f proc~h5ltmake_compressed_dataset_double_f_3->h5screate_simple_f proc~h5ltmake_compressed_dataset_double_f_3->h5pclose_f proc~h5ltmake_compressed_dataset_double_f_3->h5dcreate_f proc~h5ltmake_compressed_dataset_double_f_2->h5dwrite_f proc~h5ltmake_compressed_dataset_double_f_2->h5dclose_f proc~h5ltmake_compressed_dataset_double_f_2->h5sclose_f proc~h5ltmake_compressed_dataset_double_f_2->h5ltmake_dataset_double_f proc~h5ltmake_compressed_dataset_double_f_2->h5pset_chunk_f proc~h5ltmake_compressed_dataset_double_f_2->h5pcreate_f proc~h5ltmake_compressed_dataset_double_f_2->proc~chunk_size proc~h5ltmake_compressed_dataset_double_f_2->h5pset_deflate_f proc~h5ltmake_compressed_dataset_double_f_2->h5pset_shuffle_f proc~h5ltmake_compressed_dataset_double_f_2->h5screate_simple_f proc~h5ltmake_compressed_dataset_double_f_2->h5pclose_f proc~h5ltmake_compressed_dataset_double_f_2->h5dcreate_f proc~h5ltmake_compressed_dataset_double_f_1->h5dwrite_f proc~h5ltmake_compressed_dataset_double_f_1->h5dclose_f proc~h5ltmake_compressed_dataset_double_f_1->h5sclose_f proc~h5ltmake_compressed_dataset_double_f_1->h5ltmake_dataset_double_f proc~h5ltmake_compressed_dataset_double_f_1->h5pset_chunk_f proc~h5ltmake_compressed_dataset_double_f_1->h5pcreate_f proc~h5ltmake_compressed_dataset_double_f_1->proc~chunk_size proc~h5ltmake_compressed_dataset_double_f_1->h5pset_deflate_f proc~h5ltmake_compressed_dataset_double_f_1->h5pset_shuffle_f proc~h5ltmake_compressed_dataset_double_f_1->h5screate_simple_f proc~h5ltmake_compressed_dataset_double_f_1->h5pclose_f proc~h5ltmake_compressed_dataset_double_f_1->h5dcreate_f proc~interpol2d_arr->interface~interpol_coeff proc~interpol1d_arr->interface~interpol_coeff proc~interpol3d_2d_arr->interface~interpol_coeff proc~cyl_interpol3d_coeff_arr->proc~interpol2d_coeff proc~cyl_interpol3d_coeff_arr->proc~cyl_interpol3d_coeff proc~interpol2d_coeff_arr->proc~interpol2d_coeff proc~cyl_interpol3d_coeff->proc~interpol2d_coeff proc~interpol1d_coeff_arr->proc~interpol1d_coeff

Called by

proc~~fida_weights_mc~~CalledByGraph proc~fida_weights_mc fida_weights_mc program~fidasim fidasim program~fidasim->proc~fida_weights_mc

Contents

Source Code


Source Code

subroutine fida_weights_mc
    !+ Calculates FIDA weights
    integer :: i,j,k,ic,ncell
    integer(Int64) :: iion,ip
    real(Float64), dimension(3) :: ri,rg      !! start position
    real(Float64), dimension(3) :: vi     !! velocity of fast ions
    integer, dimension(3) :: ind      !! new actual cell
    integer,dimension(5) :: neut_types=[1,2,3,4,5]
    logical :: los_intersect

    !! Determination of the CX rates
    type(LocalProfiles) :: plasma
    type(LocalEMFields) :: fields
    real(Float64), dimension(nlevs) :: rates !! CX rates

    !! Collisiional radiative model along track
    integer :: ntrack
    integer :: jj      !! counter along track
    type(ParticleTrack),dimension(beam_grid%ntrack) :: tracks

    real(Float64) :: photons !! photon flux
    real(Float64), dimension(nlevs) :: states  !! Density of n-states
    real(Float64), dimension(nlevs) :: denn

    integer :: nwav
    real(Float64) :: etov2, energy, pitch
    real(Float64) :: dE, dP, dEdP
    real(Float64), dimension(:), allocatable :: ebarr, ptcharr
    integer, dimension(1) :: ienergy, ipitch
    real(Float64), dimension(3) :: randomu3

    !! Number of particles to launch
    real(Float64) :: fbm_denf,phase_area
    integer, dimension(beam_grid%ngrid) :: cell_ind
    real(Float64), dimension(beam_grid%nx,beam_grid%ny,beam_grid%nz) :: papprox
    integer(Int32), dimension(beam_grid%nx,beam_grid%ny,beam_grid%nz) :: nlaunch

    nwav = inputs%nlambda_wght

    !! define arrays
    !! define energy - array
    allocate(ebarr(inputs%ne_wght))
    do i=1,inputs%ne_wght
        ebarr(i)=real(i-0.5)*inputs%emax_wght/real(inputs%ne_wght)
    enddo
    dE = abs(ebarr(2)-ebarr(1))

    !! define pitch - array
    allocate(ptcharr(inputs%np_wght))
    do i=1,inputs%np_wght
        ptcharr(i)=real(i-0.5)*2./real(inputs%np_wght)-1.
    enddo
    dP = abs(ptcharr(2)-ptcharr(1))
    dEdP = dE*dP
    phase_area = dEdP*real(inputs%np_wght)*real(inputs%ne_wght)

    !! allocate storage arrays
    allocate(fweight%weight(nwav,inputs%ne_wght,inputs%np_wght,spec_chords%nchan))
    allocate(fweight%mean_f(inputs%ne_wght,inputs%np_wght,spec_chords%nchan))

    if(inputs%verbose.ge.1) then
        write(*,'(T3,"Number of Channels: ",i5)') spec_chords%nchan
        write(*,'(T3,"Nlambda: ",i4)') nwav
        write(*,'(T3,"Nenergy: ",i3)') inputs%ne_wght
        write(*,'(T3,"Maximum Energy: ",f7.2)') inputs%emax_wght
        write(*,'(T3,"LOS averaged: ",a)') "False"
    endif

    !! zero out arrays
    fweight%weight = 0.d0
    fweight%mean_f = 0.d0

    etov2 = 1.d0/(v2_to_E_per_amu*inputs%ab)

    !! Estimate how many particles to launch in each cell
    papprox=0.d0
    do ic=1,beam_grid%ngrid
        call ind2sub(beam_grid%dims,ic,ind)
        i = ind(1) ; j = ind(2) ; k = ind(3)
        call get_plasma(plasma,ind=ind)
        if(.not.plasma%in_plasma) cycle
        papprox(i,j,k)=(sum(neut%full(:,i,j,k)) + &
                        sum(neut%half(:,i,j,k)) + &
                        sum(neut%third(:,i,j,k)) + &
                        sum(neut%dcx(:,i,j,k)) + &
                        sum(neut%halo(:,i,j,k)))
    enddo

    ncell = 0
    do ic=1,beam_grid%ngrid
        call ind2sub(beam_grid%dims,ic,ind)
        i = ind(1) ; j = ind(2) ; k = ind(3)
        if(papprox(i,j,k).gt.0.0) then
            ncell = ncell + 1
            cell_ind(ncell) = ic
        endif
    enddo

    call get_nlaunch(10*inputs%n_fida,papprox, nlaunch)
    if(inputs%verbose.ge.1) then
        write(*,'(T6,"# of markers: ",i10)') sum(nlaunch)
    endif

    !! Loop over all cells that have neutrals
    !$OMP PARALLEL DO schedule(guided) private(ic,i,j,k,ind,iion,vi,ri,rg,ienergy,ipitch, &
    !$OMP tracks,ntrack,jj,plasma,fields,rates,denn,states,photons,energy,pitch, &
    !$OMP los_intersect,randomu3,fbm_denf)
    loop_over_cells: do ic = istart, ncell, istep
        call ind2sub(beam_grid%dims,cell_ind(ic),ind)
        i = ind(1) ; j = ind(2) ; k = ind(3)
        loop_over_fast_ions: do iion=1, nlaunch(i, j, k)
            !! Sample fast ion distribution uniformally
            call randind(inputs%ne_wght, ienergy)
            call randind(inputs%np_wght, ipitch)
            call randu(randomu3)
            energy = ebarr(ienergy(1)) + dE*(randomu3(1)-0.5)
            pitch = ptcharr(ipitch(1)) + dP*(randomu3(2)-0.5)
            if(energy.le.0) cycle loop_over_fast_ions

            call randu(randomu3)
            rg = [beam_grid%xc(i),beam_grid%yc(j),beam_grid%zc(k)] + beam_grid%dr*(randomu3-0.5)

            !! Get velocity
            call get_fields(fields,pos=rg)
            if(.not.fields%in_plasma) cycle loop_over_fast_ions
            call gyro_correction(fields,energy,pitch,ri,vi)

            fbm_denf = 0.0
            if (inputs%dist_type.eq.1) then
                call get_ep_denf(energy,pitch,fbm_denf,coeffs=fields%b)
            endif

            !! Find the particles path through the beam grid
            call track(ri, vi, tracks, ntrack, los_intersect)
            if(.not.los_intersect) cycle loop_over_fast_ions
            if(ntrack.eq.0) cycle loop_over_fast_ions

            !! Calculate CX probability with beam and halo neutrals
            call get_beam_cx_rate(tracks(1)%ind, ri, vi, beam_ion, neut_types, rates)
            if(sum(rates).le.0.) cycle loop_over_fast_ions
            states=rates*1.d20

            !! Calculate the spectra produced in each cell along the path
            loop_along_track: do jj=1,ntrack
                call get_plasma(plasma,pos=tracks(jj)%pos)

                call colrad(plasma,beam_ion, vi, tracks(jj)%time, states, denn, photons)

                call store_fw_photons(ienergy(1), ipitch(1), &
                     tracks(jj)%pos, vi, fbm_denf, photons/nlaunch(i,j,k))
            enddo loop_along_track
        enddo loop_over_fast_ions
    enddo loop_over_cells
    !$OMP END PARALLEL DO

    fweight%weight = ((1.d-20)*phase_area/dEdP)*fweight%weight
    fweight%mean_f = ((1.d-20)*phase_area/dEdP)*fweight%mean_f

    if(inputs%verbose.ge.1) then
        write(*,*) 'write fida weights:    ' , time(time_start)
    endif

#ifdef _MPI
    call parallel_sum(fweight%weight)
    call parallel_sum(fweight%mean_f)
    if(my_rank().eq.0) call write_fida_weights()
#else
    call write_fida_weights()
#endif

end subroutine fida_weights_mc