npa_f Subroutine

public subroutine npa_f()

Calculate Active NPA flux using a fast-ion distribution function F(E,p,r,z)

Arguments

None

Calls

proc~~npa_f~~CallsGraph proc~npa_f npa_f proc~gyro_surface gyro_surface proc~npa_f->proc~gyro_surface proc~store_npa store_npa proc~npa_f->proc~store_npa proc~hit_npa_detector hit_npa_detector proc~npa_f->proc~hit_npa_detector proc~mc_fastion mc_fastion proc~npa_f->proc~mc_fastion proc~get_nlaunch get_nlaunch proc~npa_f->proc~get_nlaunch proc~get_plasma get_plasma proc~npa_f->proc~get_plasma proc~npa_gyro_range npa_gyro_range proc~npa_f->proc~npa_gyro_range proc~ind2sub ind2sub proc~npa_f->proc~ind2sub proc~gyro_trajectory gyro_trajectory proc~npa_f->proc~gyro_trajectory proc~get_indices get_indices proc~npa_f->proc~get_indices proc~get_total_cx_rate get_total_cx_rate proc~npa_f->proc~get_total_cx_rate proc~attenuate attenuate proc~npa_f->proc~attenuate interface~parallel_sum parallel_sum proc~npa_f->interface~parallel_sum proc~xyz_to_uvw xyz_to_uvw proc~store_npa->proc~xyz_to_uvw proc~get_fields get_fields proc~store_npa->proc~get_fields proc~in_boundary in_boundary proc~hit_npa_detector->proc~in_boundary proc~line_plane_intersect line_plane_intersect proc~hit_npa_detector->proc~line_plane_intersect proc~get_distribution get_distribution proc~mc_fastion->proc~get_distribution proc~mc_beam_grid mc_beam_grid proc~mc_fastion->proc~mc_beam_grid interface~randind randind proc~mc_fastion->interface~randind proc~mc_fastion->proc~get_fields interface~randu randu proc~mc_fastion->interface~randu proc~get_nlaunch->proc~ind2sub proc~rng_init rng_init proc~get_nlaunch->proc~rng_init interface~randind_cdf randind_cdf proc~get_nlaunch->interface~randind_cdf proc~cumsum cumsum proc~get_nlaunch->proc~cumsum proc~get_plasma->proc~xyz_to_uvw proc~uvw_to_xyz uvw_to_xyz proc~get_plasma->proc~uvw_to_xyz proc~in_plasma in_plasma proc~get_plasma->proc~in_plasma proc~get_position get_position proc~get_plasma->proc~get_position proc~gyro_range gyro_range proc~npa_gyro_range->proc~gyro_range proc~approx_eq approx_eq proc~npa_gyro_range->proc~approx_eq proc~neutral_cx_rate neutral_cx_rate proc~get_total_cx_rate->proc~neutral_cx_rate proc~attenuate->proc~get_plasma proc~colrad colrad proc~attenuate->proc~colrad proc~parallel_sum_i3 parallel_sum_i3 interface~parallel_sum->proc~parallel_sum_i3 proc~parallel_sum_d0 parallel_sum_d0 interface~parallel_sum->proc~parallel_sum_d0 proc~parallel_sum_d1 parallel_sum_d1 interface~parallel_sum->proc~parallel_sum_d1 proc~parallel_sum_d5 parallel_sum_d5 interface~parallel_sum->proc~parallel_sum_d5 proc~parallel_sum_i1 parallel_sum_i1 interface~parallel_sum->proc~parallel_sum_i1 proc~parallel_sum_i0 parallel_sum_i0 interface~parallel_sum->proc~parallel_sum_i0 proc~parallel_sum_i2 parallel_sum_i2 interface~parallel_sum->proc~parallel_sum_i2 proc~parallel_sum_d4 parallel_sum_d4 interface~parallel_sum->proc~parallel_sum_d4 proc~parallel_sum_d3 parallel_sum_d3 interface~parallel_sum->proc~parallel_sum_d3 proc~parallel_sum_d2 parallel_sum_d2 interface~parallel_sum->proc~parallel_sum_d2 proc~parallel_sum_i3->proc~parallel_sum_i1 mpi_allreduce mpi_allreduce proc~parallel_sum_i3->mpi_allreduce proc~get_distribution->proc~xyz_to_uvw proc~get_distribution->proc~get_position interface~interpol interpol proc~get_distribution->interface~interpol proc~my_rank my_rank proc~rng_init->proc~my_rank proc~rng_seed rng_seed proc~rng_init->proc~rng_seed proc~get_rate_matrix get_rate_matrix proc~colrad->proc~get_rate_matrix proc~eigen eigen proc~colrad->proc~eigen proc~linsolve linsolve proc~colrad->proc~linsolve proc~bb_cx_rates bb_cx_rates proc~neutral_cx_rate->proc~bb_cx_rates proc~gyro_range->proc~in_boundary proc~gyro_range->proc~line_plane_intersect proc~gyro_surface_coordinates gyro_surface_coordinates proc~gyro_range->proc~gyro_surface_coordinates proc~line_gyro_surface_intersect line_gyro_surface_intersect proc~gyro_range->proc~line_gyro_surface_intersect proc~boundary_edge boundary_edge proc~gyro_range->proc~boundary_edge proc~in_gyro_surface in_gyro_surface proc~gyro_range->proc~in_gyro_surface proc~parallel_sum_d0->mpi_allreduce proc~mc_beam_grid->interface~randu proc~parallel_sum_d1->proc~parallel_sum_d1 proc~parallel_sum_d1->mpi_allreduce proc~parallel_sum_d5->proc~parallel_sum_d1 proc~parallel_sum_d5->mpi_allreduce proc~parallel_sum_i1->proc~parallel_sum_i1 proc~parallel_sum_i1->mpi_allreduce proc~parallel_sum_i0->mpi_allreduce proc~parallel_sum_i2->proc~parallel_sum_i1 proc~parallel_sum_i2->mpi_allreduce proc~in_plasma->proc~xyz_to_uvw proc~cyl_to_uvw cyl_to_uvw proc~in_plasma->proc~cyl_to_uvw interface~interpol_coeff interpol_coeff proc~in_plasma->interface~interpol_coeff proc~parallel_sum_d4->proc~parallel_sum_d1 proc~parallel_sum_d4->mpi_allreduce proc~parallel_sum_d3->proc~parallel_sum_d1 proc~parallel_sum_d3->mpi_allreduce proc~cyl_to_xyz cyl_to_xyz proc~get_position->proc~cyl_to_xyz proc~get_fields->proc~xyz_to_uvw proc~get_fields->proc~uvw_to_xyz proc~get_fields->proc~in_plasma proc~get_fields->proc~get_position proc~calc_perp_vectors calc_perp_vectors proc~get_fields->proc~calc_perp_vectors proc~parallel_sum_d2->proc~parallel_sum_d1 proc~parallel_sum_d2->mpi_allreduce proc~get_rate_matrix->interface~interpol_coeff proc~elmhes elmhes proc~eigen->proc~elmhes proc~hqr2 hqr2 proc~eigen->proc~hqr2 proc~balance balance proc~eigen->proc~balance proc~balback balback proc~eigen->proc~balback proc~elmtrans elmtrans proc~eigen->proc~elmtrans proc~bb_cx_rates->interface~interpol_coeff dgetrs dgetrs proc~linsolve->dgetrs proc~matinv matinv proc~linsolve->proc~matinv dgetrf dgetrf proc~linsolve->dgetrf proc~cyl_to_xyz->proc~uvw_to_xyz proc~cyl_to_xyz->proc~cyl_to_uvw proc~rswap RSWAP proc~elmhes->proc~rswap proc~hqrvec hqrvec proc~hqr2->proc~hqrvec proc~balance->proc~rswap proc~ludcmp ludcmp proc~matinv->proc~ludcmp proc~lubksb lubksb proc~matinv->proc~lubksb proc~balback->proc~rswap proc~swap swap proc~ludcmp->proc~swap proc~outerprod outerprod proc~ludcmp->proc~outerprod proc~comdiv Comdiv proc~hqrvec->proc~comdiv

Called by

proc~~npa_f~~CalledByGraph proc~npa_f npa_f program~fidasim fidasim program~fidasim->proc~npa_f

Contents

Source Code


Source Code

subroutine npa_f
    !+ Calculate Active NPA flux using a fast-ion distribution function F(E,p,r,z)
    integer :: i,j,k,det,ic
    integer(Int64) :: iion
    real(Float64), dimension(3) :: rg,ri,rf,vi
    integer, dimension(3) :: ind,pind
    real(Float64) :: denf
    type(LocalProfiles) :: plasma
    type(LocalEMFields) :: fields
    type(GyroSurface) :: gs
    real(Float64), dimension(2,4) :: gyrange
    integer, dimension(5) :: neut_types=[1,2,3,4,5]
    real(Float64), dimension(nlevs) :: rates
    real(Float64), dimension(nlevs) :: states
    real(Float64) :: flux, theta, dtheta, eb, ptch

    integer :: inpa,ichan,nrange,ir,npart,ncell
    integer, dimension(beam_grid%ngrid) :: cell_ind
    real(Float64), dimension(beam_grid%nx,beam_grid%ny,beam_grid%nz) :: papprox
    integer(Int32), dimension(beam_grid%nx,beam_grid%ny,beam_grid%nz) :: nlaunch

    papprox=0.d0
    do ic=1,beam_grid%ngrid
        call ind2sub(beam_grid%dims,ic,ind)
        i = ind(1) ; j = ind(2) ; k = ind(3)
        call get_plasma(plasma,ind=ind)
        if(.not.plasma%in_plasma) cycle
        papprox(i,j,k)=(sum(neut%full%dens(:,i,j,k)) + &
                        sum(neut%half%dens(:,i,j,k)) + &
                        sum(neut%third%dens(:,i,j,k)) + &
                        sum(neut%dcx%dens(:,i,j,k)) + &
                        sum(neut%halo%dens(:,i,j,k)))* &
                        plasma%denf
    enddo

    ncell = 0
    do ic=1,beam_grid%ngrid
        call ind2sub(beam_grid%dims,ic,ind)
        i = ind(1) ; j = ind(2) ; k = ind(3)
        if(papprox(i,j,k).gt.0.0) then
            ncell = ncell + 1
            cell_ind(ncell) = ic
        endif
    enddo

    call get_nlaunch(inputs%n_npa, papprox, nlaunch)
    if(inputs%verbose.ge.1) then
        write(*,'(T6,"# of markers: ",i12)') sum(nlaunch)
    endif

    !! Loop over all cells that can contribute to NPA signal
    !$OMP PARALLEL DO schedule(dynamic,1) private(ic,i,j,k,ind,iion,ichan,fields,nrange,gyrange, &
    !$OMP& pind,vi,ri,rf,det,plasma,rates,states,flux,denf,eb,ptch,gs,ir,theta,dtheta)
    loop_over_cells: do ic = istart, ncell, istep
        call ind2sub(beam_grid%dims,cell_ind(ic),ind)
        i = ind(1) ; j = ind(2) ; k = ind(3)
        loop_over_fast_ions: do iion=1, nlaunch(i, j, k)
            !! Sample fast ion distribution for energy and pitch
            call mc_fastion(ind, fields, eb, ptch, denf)
            if(denf.le.0.0) cycle loop_over_fast_ions

            call gyro_surface(fields, eb, ptch, fbm%A, gs)

            detector_loop: do ichan=1,npa_chords%nchan
                call npa_gyro_range(ichan, gs, gyrange, nrange)
                if(nrange.eq.0) cycle detector_loop
                gyro_range_loop: do ir=1,nrange
                    dtheta = gyrange(2,ir)
                    theta = gyrange(1,ir) + 0.5*dtheta
                    call gyro_trajectory(gs, theta, ri, vi)

                    !! Check if particle hits a NPA detector
                    call hit_npa_detector(ri, vi ,det, rf, ichan)
                    if(det.ne.ichan) then
                        if (inputs%verbose.ge.0)then
                            write(*,*) "NPA_F: Missed Detector ",ichan
                        endif
                        cycle gyro_range_loop
                    endif

                    !! Get beam grid indices at ri
                    call get_indices(ri,pind)

                    !! Calculate CX probability with beam and halo neutrals
                    call get_total_cx_rate(pind, ri, vi, neut_types, rates)
                    if(sum(rates).le.0.) cycle gyro_range_loop

                    !! Weight CX rates by ion source density
                    states=rates*denf

                    !! Attenuate states as the particle move through plasma
                    call attenuate(ri,rf,vi,states)

                    !! Store NPA Flux
                    flux = (dtheta/(2*pi))*sum(states)*beam_grid%dv/nlaunch(i,j,k)
                    call store_npa(det,ri,rf,vi,flux)
                enddo gyro_range_loop
            enddo detector_loop
        enddo loop_over_fast_ions
    enddo loop_over_cells
    !$OMP END PARALLEL DO

    npart = npa%npart
#ifdef _MPI
    call parallel_sum(npart)
    call parallel_sum(npa%flux)
#endif

    if(inputs%verbose.ge.1) then
        write(*,'(T4,"Number of Active NPA particles that hit a detector: ",i8)') npart
    endif

end subroutine npa_f