pnpa_f Subroutine

public subroutine pnpa_f()

Calculate Passive NPA flux using a fast-ion distribution function F(E,p,r,z)

Arguments

None

Calls

proc~~pnpa_f~~CallsGraph proc~pnpa_f pnpa_f proc~get_nlaunch_pass_grid get_nlaunch_pass_grid proc~pnpa_f->proc~get_nlaunch_pass_grid proc~bt_cx_rates bt_cx_rates proc~pnpa_f->proc~bt_cx_rates proc~hit_npa_detector hit_npa_detector proc~pnpa_f->proc~hit_npa_detector proc~mc_fastion_pass_grid mc_fastion_pass_grid proc~pnpa_f->proc~mc_fastion_pass_grid proc~gyro_surface gyro_surface proc~pnpa_f->proc~gyro_surface proc~store_npa store_npa proc~pnpa_f->proc~store_npa proc~ind2sub ind2sub proc~pnpa_f->proc~ind2sub proc~get_plasma get_plasma proc~pnpa_f->proc~get_plasma proc~npa_gyro_range npa_gyro_range proc~pnpa_f->proc~npa_gyro_range proc~gyro_trajectory gyro_trajectory proc~pnpa_f->proc~gyro_trajectory proc~attenuate attenuate proc~pnpa_f->proc~attenuate interface~parallel_sum parallel_sum proc~pnpa_f->interface~parallel_sum proc~get_nlaunch_pass_grid->proc~ind2sub proc~rng_init rng_init proc~get_nlaunch_pass_grid->proc~rng_init interface~randind_cdf randind_cdf proc~get_nlaunch_pass_grid->interface~randind_cdf proc~cumsum cumsum proc~get_nlaunch_pass_grid->proc~cumsum interface~interpol_coeff interpol_coeff proc~bt_cx_rates->interface~interpol_coeff proc~in_boundary in_boundary proc~hit_npa_detector->proc~in_boundary proc~line_plane_intersect line_plane_intersect proc~hit_npa_detector->proc~line_plane_intersect proc~get_distribution get_distribution proc~mc_fastion_pass_grid->proc~get_distribution interface~randind randind proc~mc_fastion_pass_grid->interface~randind proc~mc_passive_grid mc_passive_grid proc~mc_fastion_pass_grid->proc~mc_passive_grid interface~randu randu proc~mc_fastion_pass_grid->interface~randu proc~get_fields get_fields proc~mc_fastion_pass_grid->proc~get_fields proc~xyz_to_uvw xyz_to_uvw proc~store_npa->proc~xyz_to_uvw proc~store_npa->proc~get_fields proc~get_plasma->proc~xyz_to_uvw proc~uvw_to_xyz uvw_to_xyz proc~get_plasma->proc~uvw_to_xyz proc~in_plasma in_plasma proc~get_plasma->proc~in_plasma proc~get_position get_position proc~get_plasma->proc~get_position proc~gyro_range gyro_range proc~npa_gyro_range->proc~gyro_range proc~approx_eq approx_eq proc~npa_gyro_range->proc~approx_eq proc~attenuate->proc~get_plasma proc~colrad colrad proc~attenuate->proc~colrad proc~parallel_sum_i3 parallel_sum_i3 interface~parallel_sum->proc~parallel_sum_i3 proc~parallel_sum_d0 parallel_sum_d0 interface~parallel_sum->proc~parallel_sum_d0 proc~parallel_sum_d1 parallel_sum_d1 interface~parallel_sum->proc~parallel_sum_d1 proc~parallel_sum_d5 parallel_sum_d5 interface~parallel_sum->proc~parallel_sum_d5 proc~parallel_sum_i1 parallel_sum_i1 interface~parallel_sum->proc~parallel_sum_i1 proc~parallel_sum_i0 parallel_sum_i0 interface~parallel_sum->proc~parallel_sum_i0 proc~parallel_sum_i2 parallel_sum_i2 interface~parallel_sum->proc~parallel_sum_i2 proc~parallel_sum_d4 parallel_sum_d4 interface~parallel_sum->proc~parallel_sum_d4 proc~parallel_sum_d3 parallel_sum_d3 interface~parallel_sum->proc~parallel_sum_d3 proc~parallel_sum_d2 parallel_sum_d2 interface~parallel_sum->proc~parallel_sum_d2 proc~parallel_sum_i3->proc~parallel_sum_i1 mpi_allreduce mpi_allreduce proc~parallel_sum_i3->mpi_allreduce proc~get_distribution->proc~xyz_to_uvw proc~get_distribution->proc~get_position interface~interpol interpol proc~get_distribution->interface~interpol proc~my_rank my_rank proc~rng_init->proc~my_rank proc~rng_seed rng_seed proc~rng_init->proc~rng_seed proc~get_rate_matrix get_rate_matrix proc~colrad->proc~get_rate_matrix proc~eigen eigen proc~colrad->proc~eigen proc~linsolve linsolve proc~colrad->proc~linsolve proc~gyro_range->proc~in_boundary proc~gyro_range->proc~line_plane_intersect proc~gyro_surface_coordinates gyro_surface_coordinates proc~gyro_range->proc~gyro_surface_coordinates proc~line_gyro_surface_intersect line_gyro_surface_intersect proc~gyro_range->proc~line_gyro_surface_intersect proc~boundary_edge boundary_edge proc~gyro_range->proc~boundary_edge proc~in_gyro_surface in_gyro_surface proc~gyro_range->proc~in_gyro_surface proc~parallel_sum_d0->mpi_allreduce proc~parallel_sum_d1->proc~parallel_sum_d1 proc~parallel_sum_d1->mpi_allreduce proc~parallel_sum_d5->proc~parallel_sum_d1 proc~parallel_sum_d5->mpi_allreduce proc~parallel_sum_i1->proc~parallel_sum_i1 proc~parallel_sum_i1->mpi_allreduce proc~parallel_sum_i0->mpi_allreduce proc~parallel_sum_i2->proc~parallel_sum_i1 proc~parallel_sum_i2->mpi_allreduce proc~in_plasma->proc~xyz_to_uvw proc~in_plasma->interface~interpol_coeff proc~cyl_to_uvw cyl_to_uvw proc~in_plasma->proc~cyl_to_uvw proc~parallel_sum_d4->proc~parallel_sum_d1 proc~parallel_sum_d4->mpi_allreduce proc~parallel_sum_d3->proc~parallel_sum_d1 proc~parallel_sum_d3->mpi_allreduce proc~cyl_to_xyz cyl_to_xyz proc~get_position->proc~cyl_to_xyz proc~mc_passive_grid->interface~randu proc~mc_passive_grid->proc~cyl_to_uvw proc~get_fields->proc~xyz_to_uvw proc~get_fields->proc~uvw_to_xyz proc~get_fields->proc~in_plasma proc~get_fields->proc~get_position proc~calc_perp_vectors calc_perp_vectors proc~get_fields->proc~calc_perp_vectors proc~parallel_sum_d2->proc~parallel_sum_d1 proc~parallel_sum_d2->mpi_allreduce proc~get_rate_matrix->interface~interpol_coeff proc~elmhes elmhes proc~eigen->proc~elmhes proc~hqr2 hqr2 proc~eigen->proc~hqr2 proc~balance balance proc~eigen->proc~balance proc~balback balback proc~eigen->proc~balback proc~elmtrans elmtrans proc~eigen->proc~elmtrans dgetrs dgetrs proc~linsolve->dgetrs proc~matinv matinv proc~linsolve->proc~matinv dgetrf dgetrf proc~linsolve->dgetrf proc~cyl_to_xyz->proc~uvw_to_xyz proc~cyl_to_xyz->proc~cyl_to_uvw proc~rswap RSWAP proc~elmhes->proc~rswap proc~hqrvec hqrvec proc~hqr2->proc~hqrvec proc~balance->proc~rswap proc~ludcmp ludcmp proc~matinv->proc~ludcmp proc~lubksb lubksb proc~matinv->proc~lubksb proc~balback->proc~rswap proc~swap swap proc~ludcmp->proc~swap proc~outerprod outerprod proc~ludcmp->proc~outerprod proc~comdiv Comdiv proc~hqrvec->proc~comdiv

Called by

proc~~pnpa_f~~CalledByGraph proc~pnpa_f pnpa_f program~fidasim fidasim program~fidasim->proc~pnpa_f

Contents

Source Code


Source Code

subroutine pnpa_f
    !+ Calculate Passive NPA flux using a fast-ion distribution function F(E,p,r,z)
    integer :: i,j,k,det,ic
    integer(Int64) :: iion
    real(Float64), dimension(3) :: rg,ri,rf,vi,ri_uvw
    integer, dimension(3) :: ind
    real(Float64) :: denf,r
    type(LocalProfiles) :: plasma
    type(LocalEMFields) :: fields
    type(GyroSurface) :: gs
    real(Float64), dimension(2,4) :: gyrange
    real(Float64), dimension(nlevs) :: rates, rates_is
    real(Float64), dimension(nlevs) :: states
    real(Float64) :: flux, theta, dtheta, eb, ptch,max_papprox

    integer :: inpa,ichan,nrange,ir,npart,ncell, is
    integer, dimension(pass_grid%ngrid) :: cell_ind
    real(Float64), dimension(pass_grid%nr,pass_grid%nz,pass_grid%nphi) :: papprox
    integer(Int32), dimension(pass_grid%nr,pass_grid%nz,pass_grid%nphi) :: nlaunch

    papprox=0.d0
    do ic=1,pass_grid%ngrid
        call ind2sub(pass_grid%dims,ic,ind)
        i = ind(1) ; j = ind(2) ; k = ind(3)
        call get_plasma(plasma,ind=ind,input_coords=2)
        if(.not.plasma%in_plasma) cycle
        papprox(i,j,k) = sum(plasma%denn)*plasma%denf
    enddo
    max_papprox = maxval(papprox)
    where (papprox.lt.(max_papprox*1.d-6))
        papprox = 0.0
    endwhere

    ncell = 0
    do ic=1,pass_grid%ngrid
        call ind2sub(pass_grid%dims,ic,ind)
        i = ind(1) ; j = ind(2) ; k = ind(3)
        if(papprox(i,j,k).gt.0.0) then
            ncell = ncell + 1
            cell_ind(ncell) = ic
        endif
    enddo

    call get_nlaunch_pass_grid(inputs%n_pnpa, papprox, nlaunch)
    if(inputs%verbose.ge.1) then
        write(*,'(T6,"# of markers: ",i12)') sum(nlaunch)
    endif

    !! Loop over all cells that can contribute to NPA signal
    !$OMP PARALLEL DO schedule(dynamic,1) private(ic,i,j,k,ind,iion,ichan,fields,nrange,gyrange, &
    !$OMP& vi,ri,rf,det,plasma,rates,is, rates_is,states,flux,denf,eb,ptch,gs,ir,theta,dtheta,r,ri_uvw)
    loop_over_cells: do ic = istart, ncell, istep
        call ind2sub(pass_grid%dims,cell_ind(ic),ind)
        i = ind(1) ; j = ind(2) ; k = ind(3)
        loop_over_fast_ions: do iion=1, nlaunch(i, j, k)
            !! Sample fast ion distribution for energy and pitch
            call mc_fastion_pass_grid(ind, fields, eb, ptch, denf)
            if(denf.le.0.0) cycle loop_over_fast_ions

            call gyro_surface(fields, eb, ptch, fbm%A, gs)

            detector_loop: do ichan=1,npa_chords%nchan
                call npa_gyro_range(ichan, gs, gyrange, nrange)
                if(nrange.eq.0) cycle detector_loop
                gyro_range_loop: do ir=1,nrange
                    dtheta = gyrange(2,ir)
                    theta = gyrange(1,ir) + 0.5*dtheta
                    call gyro_trajectory(gs, theta, ri, vi)

                    !! Check if particle hits a NPA detector
                    call hit_npa_detector(ri, vi ,det, rf, ichan)
                    if(det.ne.ichan) then
                        if (inputs%verbose.ge.0)then
                            write(*,*) "PNPA_F: Missed Detector ",ichan
                        endif
                        cycle gyro_range_loop
                    endif

                    !! Calculate CX probability with beam and halo neutrals
                    call get_plasma(plasma, pos=ri)
                    rates = 0.d0
                    do is=1,n_thermal
                        call bt_cx_rates(plasma, plasma%denn(:,is), thermal_mass(is), vi, rates_is)
                        rates = rates + rates_is
                    enddo
                    if(sum(rates).le.0.) cycle gyro_range_loop

                    !! Weight CX rates by ion source density
                    states=rates*denf

                    !! Attenuate states as the particle move through plasma
                    call attenuate(ri,rf,vi,states)

                    !! Store NPA Flux
                    flux = (dtheta/(2*pi))*sum(states)*pass_grid%r(i)*pass_grid%dv/nlaunch(i,j,k)
                    call store_npa(det,ri,rf,vi,flux,passive=.True.)
                enddo gyro_range_loop
            enddo detector_loop
        enddo loop_over_fast_ions
    enddo loop_over_cells
    !$OMP END PARALLEL DO

    npart = pnpa%npart
#ifdef _MPI
    call parallel_sum(npart)
    call parallel_sum(pnpa%flux)
#endif

    if(inputs%verbose.ge.1) then
        write(*,'(T4,"Number of Passive NPA particles that hit a detector: ",i8)') npart
    endif

end subroutine pnpa_f