SURFEX  V8_0
Surfex V8_0 release
 All Classes Files Functions Variables
lib_mpp.F90
Go to the documentation of this file.
1 !SFX_LIC Copyright 1994-2014 CNRS, Meteo-France and Universite Paul Sabatier
2 !SFX_LIC This is part of the SURFEX software governed by the CeCILL-C licence
3 !SFX_LIC version 1. See LICENSE, CeCILL-C_V1-en.txt and CeCILL-C_V1-fr.txt
4 !SFX_LIC for details. version 1.
5 !GLT_LIC The GELATO model is a seaice model used in stand-alone or embedded mode.
6 !GLT_LIC It has been developed by Meteo-France. The holder of GELATO is Meteo-France.
7 !GLT_LIC
8 !GLT_LIC This software is governed by the CeCILL-C license under French law and biding
9 !GLT_LIC by the rules of distribution of free software. See the CeCILL-C_V1-en.txt
10 !GLT_LIC (English) and CeCILL-C_V1-fr.txt (French) for details. The CeCILL is a free
11 !GLT_LIC software license, explicitly compatible with the GNU GPL
12 !GLT_LIC (see http://www.gnu.org/licenses/license-list.en.html#CeCILL)
13 !GLT_LIC
14 !GLT_LIC The CeCILL-C licence agreement grants users the right to modify and re-use the
15 !GLT_LIC software governed by this free software license. The exercising of this right
16 !GLT_LIC is conditional upon the obligation to make available to the community the
17 !GLT_LIC modifications made to the source code of the software so as to contribute to
18 !GLT_LIC its evolution.
19 !GLT_LIC
20 !GLT_LIC In consideration of access to the source code and the rights to copy, modify
21 !GLT_LIC and redistribute granted by the license, users are provided only with a limited
22 !GLT_LIC warranty and the software's author, the holder of the economic rights, and the
23 !GLT_LIC successive licensors only have limited liability. In this respect, the risks
24 !GLT_LIC associated with loading, using, modifying and/or developing or reproducing the
25 !GLT_LIC software by the user are brought to the user's attention, given its Free
26 !GLT_LIC Software status, which may make it complicated to use, with the result that its
27 !GLT_LIC use is reserved for developers and experienced professionals having in-depth
28 !GLT_LIC computer knowledge. Users are therefore encouraged to load and test the
29 !GLT_LIC suitability of the software as regards their requirements in conditions enabling
30 !GLT_LIC the security of their systems and/or data to be ensured and, more generally, to
31 !GLT_LIC use and operate it in the same conditions of security.
32 !GLT_LIC
33 !GLT_LIC The GELATO sofware is cureently distibuted with the SURFEX software, available at
34 !GLT_LIC http://www.cnrm.meteo.fr/surfex. The fact that you download the software deemed that
35 !GLT_LIC you had knowledge of the CeCILL-C license and that you accept its terms.
36 !GLT_LIC Attempts to use this software in a way not complying with CeCILL-C license
37 !GLT_LIC may lead to prosecution.
38 !GLT_LIC
39 MODULE lib_mpp
40  !!======================================================================
41  !! *** MODULE lib_mpp ***
42  !! Ocean numerics: massively parallel processing library
43  !!=====================================================================
44  !! History : OPA ! 1994 (M. Guyon, J. Escobar, M. Imbard) Original code
45  !! 7.0 ! 1997 (A.M. Treguier) SHMEM additions
46  !! 8.0 ! 1998 (M. Imbard, J. Escobar, L. Colombet ) SHMEM and MPI
47  !! ! 1998 (J.M. Molines) Open boundary conditions
48  !! NEMO 1.0 ! 2003 (J.-M. Molines, G. Madec) F90, free form
49  !! ! 2003 (J.M. Molines) add mpp_ini_north(_3d,_2d)
50  !! - ! 2004 (R. Bourdalle Badie) isend option in mpi
51  !! ! 2004 (J.M. Molines) minloc, maxloc
52  !! - ! 2005 (G. Madec, S. Masson) npolj=5,6 F-point & ice cases
53  !! - ! 2005 (R. Redler) Replacement of MPI_COMM_WORLD except for MPI_Abort
54  !! - ! 2005 (R. Benshila, G. Madec) add extra halo case
55  !! - ! 2008 (R. Benshila) add mpp_ini_ice
56  !! 3.2 ! 2009 (R. Benshila) SHMEM suppression, north fold in lbc_nfd
57  !! 3.2 ! 2009 (O. Marti) add mpp_ini_znl
58  !! 4.0 ! 2011 (G. Madec) move ctl_ routines from in_out_manager
59  !!----------------------------------------------------------------------
60 
61  !!----------------------------------------------------------------------
62  !! ctl_stop : update momentum and tracer Kz from a tke scheme
63  !! ctl_warn : initialization, namelist read, and parameters control
64  !! ctl_opn : Open file and check if required file is available.
65  !! get_unit : give the index of an unused logical unit
66  !!----------------------------------------------------------------------
67 #if defined key_mpp_mpi || ! defined in_nemo
68 #if ! defined in_surfex
69  !!----------------------------------------------------------------------
70  !! 'key_mpp_mpi' MPI massively parallel processing library
71  !!----------------------------------------------------------------------
72  !! lib_mpp_alloc : allocate mpp arrays
73  !! mynode : indentify the processor unit
74  !! mpp_lnk : interface (defined in lbclnk) for message passing of 2d or 3d arrays (mpp_lnk_2d, mpp_lnk_3d)
75  !! mpp_lnk_3d_gather : Message passing manadgement for two 3D arrays
76  !! mpp_lnk_e : interface (defined in lbclnk) for message passing of 2d array with extra halo (mpp_lnk_2d_e)
77  !! mpprecv :
78  !! mppsend : SUBROUTINE mpp_ini_znl
79  !! mppscatter :
80  !! mppgather :
81  !! mpp_min : generic interface for mppmin_int , mppmin_a_int , mppmin_real, mppmin_a_real
82  !! mpp_max : generic interface for mppmax_int , mppmax_a_int , mppmax_real, mppmax_a_real
83  !! mpp_sum : generic interface for mppsum_int , mppsum_a_int , mppsum_real, mppsum_a_real
84  !! mpp_minloc :
85  !! mpp_maxloc :
86  !! mppsync :
87  !! mppstop :
88  !! mppobc : variant of mpp_lnk for open boundary condition
89  !! mpp_ini_north : initialisation of north fold
90  !! mpp_lbc_north : north fold processors gathering
91  !! mpp_lbc_north_e : variant of mpp_lbc_north for extra outer halo
92  !!----------------------------------------------------------------------
93  USE dom_oce ! ocean space and time domain
94  USE lbcnfd ! north fold treatment
95  USE in_out_manager ! I/O manager
96 #if ! defined in_nemo
97  USE mpi
98 #endif
99 #else
100 !! case: in_surfex
101  USE modi_abor1_sfx
102  USE modd_wp
103  USE parkind1 , ONLY : jpib, jprb
104 #ifdef SFX_MPI
105  !! Case Surfex Offline with MPI
106  !! define mpp_min, mpp_max, mpp_sum for Offline Surfex case with MPI
107  USE modd_surfex_mpi, ONLY : mpi_comm_opa => ncomm
108 #ifdef in_arpege
109  !! Case SUrfex in Arpege
110  USE mpl_allreduce_mod, ONLY : mpl_allreduce
111 #endif
112 #else
113  !! Case of Offline without MPI : no call to MPI,
114  !! mpp_min, mpp_max, mpp_sum are dummies (see below)
115 #endif
116 #endif
117 
118  IMPLICIT NONE
119 #if ! defined in_surfex
120  PRIVATE
121 #else
122 #ifdef SFX_MPI
123  include 'mpif.h'
124 #endif
125 #endif
126 
127 #if ! defined in_surfex
129  PUBLIC mynode, mppstop, mppsync !, mpp_comm_free
133  PUBLIC mppscatter, mppgather
135  PUBLIC mppsize, mpprank ! (PUBLIC for GELATO)
136  PUBLIC lib_mpp_alloc ! Called in nemogcm.F90
137  PUBLIC mppsend, mpprecv ! (PUBLIC for TAM)
138 #endif
139  !! * Interfaces
140  !! define generic interface for these routine as they are called sometimes
141  !! with scalar arguments instead of array arguments, which causes problems
142  !! for the compilation on AIX system as well as NEC and SGI. Ok on COMPACQ
143  INTERFACE mpp_min
145  END INTERFACE
146  INTERFACE mpp_max
148  END INTERFACE
149  INTERFACE mpp_sum
150 #if ! defined in_surfex
151  MODULE PROCEDURE mppsum_a_int, mppsum_int, mppsum_a_real, mppsum_real, &
153 #else
155 #endif
156  END INTERFACE
157 #if ! defined in_surfex
158  INTERFACE mpp_lbc_north
159  MODULE PROCEDURE mpp_lbc_north_3d, mpp_lbc_north_2d
160  END INTERFACE
161  INTERFACE mpp_minloc
162  MODULE PROCEDURE mpp_minloc2d ,mpp_minloc3d
163  END INTERFACE
164  INTERFACE mpp_maxloc
165  MODULE PROCEDURE mpp_maxloc2d ,mpp_maxloc3d
166  END INTERFACE
167 
168  !! ========================= !!
169  !! MPI variable definition !!
170  !! ========================= !!
171 !$AGRIF_DO_NOT_TREAT
172 ! INCLUDE 'mpif.h' ! Replaced with USE mpi
173 !$AGRIF_END_DO_NOT_TREAT
174 
175  LOGICAL, PUBLIC, PARAMETER :: lk_mpp = .TRUE. !: mpp flag
176 
177  INTEGER, PARAMETER :: nprocmax = 2**10 ! maximun dimension (required to be a power of 2)
178 
179  INTEGER :: mppsize ! number of process
180  INTEGER :: mpprank ! process number [ 0 - size-1 ]
181 !$AGRIF_DO_NOT_TREAT
182  INTEGER, PUBLIC :: mpi_comm_opa ! opa local communicator
183 !$AGRIF_END_DO_NOT_TREAT
184 
185  INTEGER :: MPI_SUMDD
186 
187  ! variables used in case of sea-ice
188  INTEGER, PUBLIC :: ncomm_ice !: communicator made by the processors with sea-ice (public so that it can be freed in limthd)
189  INTEGER :: ngrp_iworld ! group ID for the world processors (for rheology)
190  INTEGER :: ngrp_ice ! group ID for the ice processors (for rheology)
191  INTEGER :: ndim_rank_ice ! number of 'ice' processors
192  INTEGER :: n_ice_root ! number (in the comm_ice) of proc 0 in the ice comm
193  INTEGER, DIMENSION(:), ALLOCATABLE, SAVE :: nrank_ice ! dimension ndim_rank_ice
194 
195  ! variables used for zonal integration
196  INTEGER, PUBLIC :: ncomm_znl !: communicator made by the processors on the same zonal average
197  LOGICAL, PUBLIC :: l_znl_root ! True on the 'left'most processor on the same row
198  INTEGER :: ngrp_znl ! group ID for the znl processors
199  INTEGER :: ndim_rank_znl ! number of processors on the same zonal average
200  INTEGER, DIMENSION(:), ALLOCATABLE, SAVE :: nrank_znl ! dimension ndim_rank_znl, number of the procs into the same znl domain
201 
202  ! North fold condition in mpp_mpi with jpni > 1 (PUBLIC for TAM)
203  INTEGER, PUBLIC :: ngrp_world ! group ID for the world processors
204  INTEGER, PUBLIC :: ngrp_opa ! group ID for the opa processors
205  INTEGER, PUBLIC :: ngrp_north ! group ID for the northern processors (to be fold)
206  INTEGER, PUBLIC :: ncomm_north ! communicator made by the processors belonging to ngrp_north
207  INTEGER, PUBLIC :: ndim_rank_north ! number of 'sea' processor in the northern line (can be /= jpni !)
208  INTEGER, PUBLIC :: njmppmax ! value of njmpp for the processors of the northern line
209  INTEGER, PUBLIC :: north_root ! number (in the comm_opa) of proc 0 in the northern comm
210  INTEGER, DIMENSION(:), ALLOCATABLE, SAVE, PUBLIC :: nrank_north ! dimension ndim_rank_north
211 
212  ! Type of send : standard, buffered, immediate
213  CHARACTER(len=1), PUBLIC :: cn_mpi_send = 'S' ! type od mpi send/recieve (S=standard, B=bsend, I=isend)
214  LOGICAL, PUBLIC :: l_isend = .FALSE. ! isend use indicator (T if cn_mpi_send='I')
215  INTEGER, PUBLIC :: nn_buffer = 0 ! size of the buffer in case of mpi_bsend
216 
217  REAL(wp), DIMENSION(:), ALLOCATABLE, SAVE :: tampon ! buffer in case of bsend
218 
219  ! message passing arrays
220  REAL(wp), DIMENSION(:,:,:,:,:), ALLOCATABLE, SAVE :: t4ns, t4sn ! 2 x 3d for north-south & south-north
221  REAL(wp), DIMENSION(:,:,:,:,:), ALLOCATABLE, SAVE :: t4ew, t4we ! 2 x 3d for east-west & west-east
222  REAL(wp), DIMENSION(:,:,:,:,:), ALLOCATABLE, SAVE :: t4p1, t4p2 ! 2 x 3d for north fold
223  REAL(wp), DIMENSION(:,:,:,:) , ALLOCATABLE, SAVE :: t3ns, t3sn ! 3d for north-south & south-north
224  REAL(wp), DIMENSION(:,:,:,:) , ALLOCATABLE, SAVE :: t3ew, t3we ! 3d for east-west & west-east
225  REAL(wp), DIMENSION(:,:,:,:) , ALLOCATABLE, SAVE :: t3p1, t3p2 ! 3d for north fold
226  REAL(wp), DIMENSION(:,:,:) , ALLOCATABLE, SAVE :: t2ns, t2sn ! 2d for north-south & south-north
227  REAL(wp), DIMENSION(:,:,:) , ALLOCATABLE, SAVE :: t2ew, t2we ! 2d for east-west & west-east
228  REAL(wp), DIMENSION(:,:,:) , ALLOCATABLE, SAVE :: t2p1, t2p2 ! 2d for north fold
229  REAL(wp), DIMENSION(:,:,:) , ALLOCATABLE, SAVE :: tr2ns, tr2sn ! 2d for north-south & south-north + extra outer halo
230  REAL(wp), DIMENSION(:,:,:) , ALLOCATABLE, SAVE :: tr2ew, tr2we ! 2d for east-west & west-east + extra outer halo
231 
232  ! Arrays used in mpp_lbc_north_3d()
233  REAL(wp), DIMENSION(:,:,:) , ALLOCATABLE, SAVE :: ztab, znorthloc
234  REAL(wp), DIMENSION(:,:,:,:), ALLOCATABLE, SAVE :: znorthgloio
235  REAL(wp), DIMENSION(:,:,:) , ALLOCATABLE, SAVE :: zfoldwk ! Workspace for message transfers avoiding mpi_allgather
236 
237  ! Arrays used in mpp_lbc_north_2d()
238  REAL(wp), DIMENSION(:,:) , ALLOCATABLE, SAVE :: ztab_2d, znorthloc_2d
239  REAL(wp), DIMENSION(:,:,:), ALLOCATABLE, SAVE :: znorthgloio_2d
240  REAL(wp), DIMENSION(:,:) , ALLOCATABLE, SAVE :: zfoldwk_2d ! Workspace for message transfers avoiding mpi_allgather
241 
242  ! Arrays used in mpp_lbc_north_e()
243  REAL(wp), DIMENSION(:,:) , ALLOCATABLE, SAVE :: ztab_e, znorthloc_e
244  REAL(wp), DIMENSION(:,:,:), ALLOCATABLE, SAVE :: znorthgloio_e
245 
246  ! North fold arrays used to minimise the use of allgather operations. Set in nemo_northcomms (nemogcm) so need to be public
247  INTEGER, PUBLIC, PARAMETER :: jpmaxngh = 8 ! Assumed maximum number of active neighbours
248  INTEGER, PUBLIC, PARAMETER :: jptyps = 5 ! Number of different neighbour lists to be used for northfold exchanges
249  INTEGER, PUBLIC, DIMENSION (jpmaxngh,jptyps) :: isendto
250  INTEGER, PUBLIC, DIMENSION (jptyps) :: nsndto
251  LOGICAL, PUBLIC :: ln_nnogather = .FALSE. ! namelist control of northfold comms
252  LOGICAL, PUBLIC :: l_north_nogather = .FALSE. ! internal control of northfold comms
253  INTEGER, PUBLIC :: ityp
254  !!----------------------------------------------------------------------
255  !! NEMO/OPA 3.3 , NEMO Consortium (2010)
256  !! $Id$
257  !! Software governed by the CeCILL licence (NEMOGCM/NEMO_CeCILL.txt)
258  !!----------------------------------------------------------------------
259 #endif
260  CONTAINS
261 #if ! defined in_surfex
262  INTEGER FUNCTION lib_mpp_alloc( kumout )
263  !!----------------------------------------------------------------------
264  !! *** routine lib_mpp_alloc ***
265  !!----------------------------------------------------------------------
266  INTEGER, INTENT(in) :: kumout ! ocean.output logical unit
267  !!----------------------------------------------------------------------
268  !
269  ALLOCATE( t4ns(jpi,jprecj,jpk,2,2) , t4sn(jpi,jprecj,jpk,2,2) , &
270  & t4ew(jpj,jpreci,jpk,2,2) , t4we(jpj,jpreci,jpk,2,2) , &
271  & t4p1(jpi,jprecj,jpk,2,2) , t4p2(jpi,jprecj,jpk,2,2) , &
272  & t3ns(jpi,jprecj,jpk,2) , t3sn(jpi,jprecj,jpk,2) , &
273  & t3ew(jpj,jpreci,jpk,2) , t3we(jpj,jpreci,jpk,2) , &
274  & t3p1(jpi,jprecj,jpk,2) , t3p2(jpi,jprecj,jpk,2) , &
275  & t2ns(jpi,jprecj ,2) , t2sn(jpi,jprecj ,2) , &
276  & t2ew(jpj,jpreci ,2) , t2we(jpj,jpreci ,2) , &
277  & t2p1(jpi,jprecj ,2) , t2p2(jpi,jprecj ,2) , &
278  !
279  & tr2ns(1-jpr2di:jpi+jpr2di,jprecj+jpr2dj,2) , &
280  & tr2sn(1-jpr2di:jpi+jpr2di,jprecj+jpr2dj,2) , &
281  & tr2ew(1-jpr2dj:jpj+jpr2dj,jpreci+jpr2di,2) , &
282  & tr2we(1-jpr2dj:jpj+jpr2dj,jpreci+jpr2di,2) , &
283  !
284  & ztab(jpiglo,4,jpk) , znorthloc(jpi,4,jpk) , znorthgloio(jpi,4,jpk,jpni) , &
285  & zfoldwk(jpi,4,jpk) , &
286  !
287  & ztab_2d(jpiglo,4) , znorthloc_2d(jpi,4) , znorthgloio_2d(jpi,4,jpni) , &
288  & zfoldwk_2d(jpi,4) , &
289  !
290  & ztab_e(jpiglo,4+2*jpr2dj) , znorthloc_e(jpi,4+2*jpr2dj) , znorthgloio_e(jpi,4+2*jpr2dj,jpni) , &
291  !
292  & stat=lib_mpp_alloc )
293  !
294  IF( lib_mpp_alloc /= 0 ) THEN
295  WRITE(kumout,cform_war)
296  WRITE(kumout,*) 'lib_mpp_alloc : failed to allocate arrays'
297  ENDIF
298  !
299  END FUNCTION lib_mpp_alloc
300 
301 
302  FUNCTION mynode( ldtxt, kumnam, kstop, localComm )
303  !!----------------------------------------------------------------------
304  !! *** routine mynode ***
305  !!
306  !! ** Purpose : Find processor unit
307  !!----------------------------------------------------------------------
308  CHARACTER(len=*),DIMENSION(:), INTENT( out) :: ldtxt
309  INTEGER , INTENT(in ) :: kumnam ! namelist logical unit
310  INTEGER , INTENT(inout) :: kstop ! stop indicator
311  INTEGER, OPTIONAL , INTENT(in ) :: localcomm
312  !
313  INTEGER :: mynode, ierr, code, ji, ii
314  LOGICAL :: mpi_was_called
315  !
316  namelist/nammpp/ cn_mpi_send, nn_buffer, jpni, jpnj, jpnij, ln_nnogather
317  !!----------------------------------------------------------------------
318  !
319  ii = 1
320  WRITE(ldtxt(ii),*) ; ii = ii + 1
321  WRITE(ldtxt(ii),*) 'mynode : mpi initialisation' ; ii = ii + 1
322  WRITE(ldtxt(ii),*) '~~~~~~ ' ; ii = ii + 1
323  !
324  jpni = -1; jpnj = -1; jpnij = -1
325  rewind( kumnam ) ! Namelist namrun : parameters of the run
326  READ ( kumnam, nammpp )
327  ! ! control print
328  WRITE(ldtxt(ii),*) ' Namelist nammpp' ; ii = ii + 1
329  WRITE(ldtxt(ii),*) ' mpi send type cn_mpi_send = ', cn_mpi_send ; ii = ii + 1
330  WRITE(ldtxt(ii),*) ' size in bytes of exported buffer nn_buffer = ', nn_buffer ; ii = ii + 1
331 
332 #if defined key_agrif
333  IF( .NOT. agrif_root() ) THEN
334  jpni = agrif_parent(jpni )
335  jpnj = agrif_parent(jpnj )
336  jpnij = agrif_parent(jpnij)
337  ENDIF
338 #endif
339 
340  IF(jpnij < 1)THEN
341  ! If jpnij is not specified in namelist then we calculate it - this
342  ! means there will be no land cutting out.
343  jpnij = jpni * jpnj
344  END IF
345 
346  IF( (jpni < 1) .OR. (jpnj < 1) )THEN
347  WRITE(ldtxt(ii),*) ' jpni, jpnj and jpnij will be calculated automatically'; ii = ii + 1
348  ELSE
349  WRITE(ldtxt(ii),*) ' processor grid extent in i jpni = ',jpni; ii = ii + 1
350  WRITE(ldtxt(ii),*) ' processor grid extent in j jpnj = ',jpnj; ii = ii + 1
351  WRITE(ldtxt(ii),*) ' number of local domains jpnij = ',jpnij; ii = ii +1
352  END IF
353 
354  WRITE(ldtxt(ii),*) ' avoid use of mpi_allgather at the north fold ln_nnogather = ', ln_nnogather ; ii = ii + 1
355 
356  CALL mpi_initialized( mpi_was_called, code )
357  IF( code /= mpi_success ) THEN
358  DO ji = 1, SIZE(ldtxt)
359  IF( trim(ldtxt(ji)) /= '' ) WRITE(*,*) ldtxt(ji) ! control print of mynode
360  END DO
361  WRITE(*, cform_err)
362  WRITE(*, *) 'lib_mpp: Error in routine mpi_initialized'
363  CALL mpi_abort( mpi_comm_world, code, ierr )
364  ENDIF
365 
366  IF( mpi_was_called ) THEN
367  !
368  SELECT CASE ( cn_mpi_send )
369  CASE ( 'S' ) ! Standard mpi send (blocking)
370  WRITE(ldtxt(ii),*) ' Standard blocking mpi send (send)' ; ii = ii + 1
371  CASE ( 'B' ) ! Buffer mpi send (blocking)
372  WRITE(ldtxt(ii),*) ' Buffer blocking mpi send (bsend)' ; ii = ii + 1
373  IF( agrif_root() ) CALL mpi_init_opa( ldtxt, ii, ierr )
374  CASE ( 'I' ) ! Immediate mpi send (non-blocking send)
375  WRITE(ldtxt(ii),*) ' Immediate non-blocking send (isend)' ; ii = ii + 1
376  l_isend = .true.
377  CASE default
378  WRITE(ldtxt(ii),cform_err) ; ii = ii + 1
379  WRITE(ldtxt(ii),*) ' bad value for cn_mpi_send = ', cn_mpi_send ; ii = ii + 1
380  kstop = kstop + 1
381  END SELECT
382  ELSE IF ( present(localcomm) .and. .not. mpi_was_called ) THEN
383  WRITE(ldtxt(ii),*) ' lib_mpp: You cannot provide a local communicator ' ; ii = ii + 1
384  WRITE(ldtxt(ii),*) ' without calling MPI_Init before ! ' ; ii = ii + 1
385  kstop = kstop + 1
386  ELSE
387  SELECT CASE ( cn_mpi_send )
388  CASE ( 'S' ) ! Standard mpi send (blocking)
389  WRITE(ldtxt(ii),*) ' Standard blocking mpi send (send)' ; ii = ii + 1
390  CALL mpi_init( ierr )
391  CASE ( 'B' ) ! Buffer mpi send (blocking)
392  WRITE(ldtxt(ii),*) ' Buffer blocking mpi send (bsend)' ; ii = ii + 1
393  IF( agrif_root() ) CALL mpi_init_opa( ldtxt, ii, ierr )
394  CASE ( 'I' ) ! Immediate mpi send (non-blocking send)
395  WRITE(ldtxt(ii),*) ' Immediate non-blocking send (isend)' ; ii = ii + 1
396  l_isend = .true.
397  CALL mpi_init( ierr )
398  CASE default
399  WRITE(ldtxt(ii),cform_err) ; ii = ii + 1
400  WRITE(ldtxt(ii),*) ' bad value for cn_mpi_send = ', cn_mpi_send ; ii = ii + 1
401  kstop = kstop + 1
402  END SELECT
403  !
404  ENDIF
405 
406  IF( present(localcomm) ) THEN
407  IF( agrif_root() ) THEN
408  mpi_comm_opa = localcomm
409  ENDIF
410  ELSE
411  CALL mpi_comm_dup( mpi_comm_world, mpi_comm_opa, code)
412  IF( code /= mpi_success ) THEN
413  DO ji = 1, SIZE(ldtxt)
414  IF( trim(ldtxt(ji)) /= '' ) WRITE(*,*) ldtxt(ji) ! control print of mynode
415  END DO
416  WRITE(*, cform_err)
417  WRITE(*, *) ' lib_mpp: Error in routine mpi_comm_dup'
418  CALL mpi_abort( mpi_comm_world, code, ierr )
419  ENDIF
420  ENDIF
421 
422  CALL mpi_comm_rank( mpi_comm_opa, mpprank, ierr )
423  CALL mpi_comm_size( mpi_comm_opa, mppsize, ierr )
424  mynode = mpprank
425  !
426  CALL mpi_op_create(ddpdd_mpi, .true., mpi_sumdd, ierr)
427  !
428  END FUNCTION mynode
429 
430 
431  SUBROUTINE mpp_lnk_3d( ptab, cd_type, psgn, cd_mpp, pval )
432  !!----------------------------------------------------------------------
433  !! *** routine mpp_lnk_3d ***
434  !!
435  !! ** Purpose : Message passing manadgement
436  !!
437  !! ** Method : Use mppsend and mpprecv function for passing mask
438  !! between processors following neighboring subdomains.
439  !! domain parameters
440  !! nlci : first dimension of the local subdomain
441  !! nlcj : second dimension of the local subdomain
442  !! nbondi : mark for "east-west local boundary"
443  !! nbondj : mark for "north-south local boundary"
444  !! noea : number for local neighboring processors
445  !! nowe : number for local neighboring processors
446  !! noso : number for local neighboring processors
447  !! nono : number for local neighboring processors
448  !!
449  !! ** Action : ptab with update value at its periphery
450  !!
451  !!----------------------------------------------------------------------
452  REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT(inout) :: ptab ! 3D array on which the boundary condition is applied
453  CHARACTER(len=1) , INTENT(in ) :: cd_type ! define the nature of ptab array grid-points
454  ! ! = T , U , V , F , W points
455  REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary
456  ! ! = 1. , the sign is kept
457  CHARACTER(len=3), OPTIONAL , INTENT(in ) :: cd_mpp ! fill the overlap area only
458  REAL(wp) , OPTIONAL , INTENT(in ) :: pval ! background value (used at closed boundaries)
459  !!
460  INTEGER :: ji, jj, jk, jl ! dummy loop indices
461  INTEGER :: imigr, iihom, ijhom ! temporary integers
462  INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend
463  REAL(wp) :: zland
464  INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend
465  !!----------------------------------------------------------------------
466 
467  IF( present( pval ) ) THEN ; zland = pval ! set land value
468  ELSE ; zland = 0.e0 ! zero by default
469  ENDIF
470 
471  ! 1. standard boundary treatment
472  ! ------------------------------
473  IF( present( cd_mpp ) ) THEN ! only fill added line/raw with existing values
474  !
475  ! WARNING ptab is defined only between nld and nle
476  DO jk = 1, jpk
477  DO jj = nlcj+1, jpj ! added line(s) (inner only)
478  ptab(nldi :nlei , jj ,jk) = ptab(nldi:nlei, nlej,jk)
479  ptab(1 :nldi-1, jj ,jk) = ptab(nldi , nlej,jk)
480  ptab(nlei+1:nlci , jj ,jk) = ptab( nlei, nlej,jk)
481  END DO
482  DO ji = nlci+1, jpi ! added column(s) (full)
483  ptab(ji ,nldj :nlej ,jk) = ptab( nlei,nldj:nlej,jk)
484  ptab(ji ,1 :nldj-1,jk) = ptab( nlei,nldj ,jk)
485  ptab(ji ,nlej+1:jpj ,jk) = ptab( nlei, nlej,jk)
486  END DO
487  END DO
488  !
489  ELSE ! standard close or cyclic treatment
490  !
491  ! ! East-West boundaries
492  ! !* Cyclic east-west
493  IF( nbondi == 2 .AND. (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) ) THEN
494  ptab( 1 ,:,:) = ptab(jpim1,:,:)
495  ptab(jpi,:,:) = ptab( 2 ,:,:)
496  ELSE !* closed
497  IF( .NOT. cd_type == 'F' ) ptab( 1 :jpreci,:,:) = zland ! south except F-point
498  ptab(nlci-jpreci+1:jpi ,:,:) = zland ! north
499  ENDIF
500  ! ! North-South boundaries (always closed)
501  IF( .NOT. cd_type == 'F' ) ptab(:, 1 :jprecj,:) = zland ! south except F-point
502  ptab(:,nlcj-jprecj+1:jpj ,:) = zland ! north
503  !
504  ENDIF
505 
506  ! 2. East and west directions exchange
507  ! ------------------------------------
508  ! we play with the neigbours AND the row number because of the periodicity
509  !
510  SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions
511  CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case)
512  iihom = nlci-nreci
513  DO jl = 1, jpreci
514  t3ew(:,jl,:,1) = ptab(jpreci+jl,:,:)
515  t3we(:,jl,:,1) = ptab(iihom +jl,:,:)
516  END DO
517  END SELECT
518  !
519  ! ! Migrations
520  imigr = jpreci * jpj * jpk
521  !
522  SELECT CASE ( nbondi )
523  CASE ( -1 )
524  CALL mppsend( 2, t3we(1,1,1,1), imigr, noea, ml_req1 )
525  CALL mpprecv( 1, t3ew(1,1,1,2), imigr, noea )
526  IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)
527  CASE ( 0 )
528  CALL mppsend( 1, t3ew(1,1,1,1), imigr, nowe, ml_req1 )
529  CALL mppsend( 2, t3we(1,1,1,1), imigr, noea, ml_req2 )
530  CALL mpprecv( 1, t3ew(1,1,1,2), imigr, noea )
531  CALL mpprecv( 2, t3we(1,1,1,2), imigr, nowe )
532  IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)
533  IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err)
534  CASE ( 1 )
535  CALL mppsend( 1, t3ew(1,1,1,1), imigr, nowe, ml_req1 )
536  CALL mpprecv( 2, t3we(1,1,1,2), imigr, nowe )
537  IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)
538  END SELECT
539  !
540  ! ! Write Dirichlet lateral conditions
541  iihom = nlci-jpreci
542  !
543  SELECT CASE ( nbondi )
544  CASE ( -1 )
545  DO jl = 1, jpreci
546  ptab(iihom+jl,:,:) = t3ew(:,jl,:,2)
547  END DO
548  CASE ( 0 )
549  DO jl = 1, jpreci
550  ptab(jl ,:,:) = t3we(:,jl,:,2)
551  ptab(iihom+jl,:,:) = t3ew(:,jl,:,2)
552  END DO
553  CASE ( 1 )
554  DO jl = 1, jpreci
555  ptab(jl ,:,:) = t3we(:,jl,:,2)
556  END DO
557  END SELECT
558 
559 
560  ! 3. North and south directions
561  ! -----------------------------
562  ! always closed : we play only with the neigbours
563  !
564  IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions
565  ijhom = nlcj-nrecj
566  DO jl = 1, jprecj
567  t3sn(:,jl,:,1) = ptab(:,ijhom +jl,:)
568  t3ns(:,jl,:,1) = ptab(:,jprecj+jl,:)
569  END DO
570  ENDIF
571  !
572  ! ! Migrations
573  imigr = jprecj * jpi * jpk
574  !
575  SELECT CASE ( nbondj )
576  CASE ( -1 )
577  CALL mppsend( 4, t3sn(1,1,1,1), imigr, nono, ml_req1 )
578  CALL mpprecv( 3, t3ns(1,1,1,2), imigr, nono )
579  IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)
580  CASE ( 0 )
581  CALL mppsend( 3, t3ns(1,1,1,1), imigr, noso, ml_req1 )
582  CALL mppsend( 4, t3sn(1,1,1,1), imigr, nono, ml_req2 )
583  CALL mpprecv( 3, t3ns(1,1,1,2), imigr, nono )
584  CALL mpprecv( 4, t3sn(1,1,1,2), imigr, noso )
585  IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)
586  IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err)
587  CASE ( 1 )
588  CALL mppsend( 3, t3ns(1,1,1,1), imigr, noso, ml_req1 )
589  CALL mpprecv( 4, t3sn(1,1,1,2), imigr, noso )
590  IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)
591  END SELECT
592  !
593  ! ! Write Dirichlet lateral conditions
594  ijhom = nlcj-jprecj
595  !
596  SELECT CASE ( nbondj )
597  CASE ( -1 )
598  DO jl = 1, jprecj
599  ptab(:,ijhom+jl,:) = t3ns(:,jl,:,2)
600  END DO
601  CASE ( 0 )
602  DO jl = 1, jprecj
603  ptab(:,jl ,:) = t3sn(:,jl,:,2)
604  ptab(:,ijhom+jl,:) = t3ns(:,jl,:,2)
605  END DO
606  CASE ( 1 )
607  DO jl = 1, jprecj
608  ptab(:,jl,:) = t3sn(:,jl,:,2)
609  END DO
610  END SELECT
611 
612 
613  ! 4. north fold treatment
614  ! -----------------------
615  !
616  IF( npolj /= 0 .AND. .NOT. present(cd_mpp) ) THEN
617  !
618  SELECT CASE ( jpni )
619  CASE ( 1 ) ; CALL lbc_nfd( ptab, cd_type, psgn ) ! only 1 northern proc, no mpp
620  CASE default ; CALL mpp_lbc_north( ptab, cd_type, psgn ) ! for all northern procs.
621  END SELECT
622  !
623  ENDIF
624  !
625  END SUBROUTINE mpp_lnk_3d
626 
627 
628  SUBROUTINE mpp_lnk_2d( pt2d, cd_type, psgn, cd_mpp, pval )
629  !!----------------------------------------------------------------------
630  !! *** routine mpp_lnk_2d ***
631  !!
632  !! ** Purpose : Message passing manadgement for 2d array
633  !!
634  !! ** Method : Use mppsend and mpprecv function for passing mask
635  !! between processors following neighboring subdomains.
636  !! domain parameters
637  !! nlci : first dimension of the local subdomain
638  !! nlcj : second dimension of the local subdomain
639  !! nbondi : mark for "east-west local boundary"
640  !! nbondj : mark for "north-south local boundary"
641  !! noea : number for local neighboring processors
642  !! nowe : number for local neighboring processors
643  !! noso : number for local neighboring processors
644  !! nono : number for local neighboring processors
645  !!
646  !!----------------------------------------------------------------------
647  REAL(wp), DIMENSION(jpi,jpj), INTENT(inout) :: pt2d ! 2D array on which the boundary condition is applied
648  CHARACTER(len=1) , INTENT(in ) :: cd_type ! define the nature of ptab array grid-points
649  ! ! = T , U , V , F , W and I points
650  REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary
651  ! ! = 1. , the sign is kept
652  CHARACTER(len=3), OPTIONAL , INTENT(in ) :: cd_mpp ! fill the overlap area only
653  REAL(wp) , OPTIONAL , INTENT(in ) :: pval ! background value (used at closed boundaries)
654  !!
655  INTEGER :: ji, jj, jl ! dummy loop indices
656  INTEGER :: imigr, iihom, ijhom ! temporary integers
657  INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend
658  REAL(wp) :: zland
659  INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend
660  !!----------------------------------------------------------------------
661 
662  IF( present( pval ) ) THEN ; zland = pval ! set land value
663  ELSE ; zland = 0.e0 ! zero by default
664  ENDIF
665 
666  ! 1. standard boundary treatment
667  ! ------------------------------
668  !
669  IF( present( cd_mpp ) ) THEN ! only fill added line/raw with existing values
670  !
671  ! WARNING pt2d is defined only between nld and nle
672  DO jj = nlcj+1, jpj ! added line(s) (inner only)
673  pt2d(nldi :nlei , jj ) = pt2d(nldi:nlei, nlej)
674  pt2d(1 :nldi-1, jj ) = pt2d(nldi , nlej)
675  pt2d(nlei+1:nlci , jj ) = pt2d( nlei, nlej)
676  END DO
677  DO ji = nlci+1, jpi ! added column(s) (full)
678  pt2d(ji ,nldj :nlej ) = pt2d( nlei,nldj:nlej)
679  pt2d(ji ,1 :nldj-1) = pt2d( nlei,nldj )
680  pt2d(ji ,nlej+1:jpj ) = pt2d( nlei, nlej)
681  END DO
682  !
683  ELSE ! standard close or cyclic treatment
684  !
685  ! ! East-West boundaries
686  IF( nbondi == 2 .AND. & ! Cyclic east-west
687  & (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) ) THEN
688  pt2d( 1 ,:) = pt2d(jpim1,:) ! west
689  pt2d(jpi,:) = pt2d( 2 ,:) ! east
690  ELSE ! closed
691  IF( .NOT. cd_type == 'F' ) pt2d( 1 :jpreci,:) = zland ! south except F-point
692  pt2d(nlci-jpreci+1:jpi ,:) = zland ! north
693  ENDIF
694  ! ! North-South boundaries (always closed)
695  IF( .NOT. cd_type == 'F' ) pt2d(:, 1 :jprecj) = zland !south except F-point
696  pt2d(:,nlcj-jprecj+1:jpj ) = zland ! north
697  !
698  ENDIF
699 
700  ! 2. East and west directions exchange
701  ! ------------------------------------
702  ! we play with the neigbours AND the row number because of the periodicity
703  !
704  SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions
705  CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case)
706  iihom = nlci-nreci
707  DO jl = 1, jpreci
708  t2ew(:,jl,1) = pt2d(jpreci+jl,:)
709  t2we(:,jl,1) = pt2d(iihom +jl,:)
710  END DO
711  END SELECT
712  !
713  ! ! Migrations
714  imigr = jpreci * jpj
715  !
716  SELECT CASE ( nbondi )
717  CASE ( -1 )
718  CALL mppsend( 2, t2we(1,1,1), imigr, noea, ml_req1 )
719  CALL mpprecv( 1, t2ew(1,1,2), imigr, noea )
720  IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)
721  CASE ( 0 )
722  CALL mppsend( 1, t2ew(1,1,1), imigr, nowe, ml_req1 )
723  CALL mppsend( 2, t2we(1,1,1), imigr, noea, ml_req2 )
724  CALL mpprecv( 1, t2ew(1,1,2), imigr, noea )
725  CALL mpprecv( 2, t2we(1,1,2), imigr, nowe )
726  IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)
727  IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err)
728  CASE ( 1 )
729  CALL mppsend( 1, t2ew(1,1,1), imigr, nowe, ml_req1 )
730  CALL mpprecv( 2, t2we(1,1,2), imigr, nowe )
731  IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)
732  END SELECT
733  !
734  ! ! Write Dirichlet lateral conditions
735  iihom = nlci - jpreci
736  !
737  SELECT CASE ( nbondi )
738  CASE ( -1 )
739  DO jl = 1, jpreci
740  pt2d(iihom+jl,:) = t2ew(:,jl,2)
741  END DO
742  CASE ( 0 )
743  DO jl = 1, jpreci
744  pt2d(jl ,:) = t2we(:,jl,2)
745  pt2d(iihom+jl,:) = t2ew(:,jl,2)
746  END DO
747  CASE ( 1 )
748  DO jl = 1, jpreci
749  pt2d(jl ,:) = t2we(:,jl,2)
750  END DO
751  END SELECT
752 
753 
754  ! 3. North and south directions
755  ! -----------------------------
756  ! always closed : we play only with the neigbours
757  !
758  IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions
759  ijhom = nlcj-nrecj
760  DO jl = 1, jprecj
761  t2sn(:,jl,1) = pt2d(:,ijhom +jl)
762  t2ns(:,jl,1) = pt2d(:,jprecj+jl)
763  END DO
764  ENDIF
765  !
766  ! ! Migrations
767  imigr = jprecj * jpi
768  !
769  SELECT CASE ( nbondj )
770  CASE ( -1 )
771  CALL mppsend( 4, t2sn(1,1,1), imigr, nono, ml_req1 )
772  CALL mpprecv( 3, t2ns(1,1,2), imigr, nono )
773  IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)
774  CASE ( 0 )
775  CALL mppsend( 3, t2ns(1,1,1), imigr, noso, ml_req1 )
776  CALL mppsend( 4, t2sn(1,1,1), imigr, nono, ml_req2 )
777  CALL mpprecv( 3, t2ns(1,1,2), imigr, nono )
778  CALL mpprecv( 4, t2sn(1,1,2), imigr, noso )
779  IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)
780  IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err)
781  CASE ( 1 )
782  CALL mppsend( 3, t2ns(1,1,1), imigr, noso, ml_req1 )
783  CALL mpprecv( 4, t2sn(1,1,2), imigr, noso )
784  IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)
785  END SELECT
786  !
787  ! ! Write Dirichlet lateral conditions
788  ijhom = nlcj - jprecj
789  !
790  SELECT CASE ( nbondj )
791  CASE ( -1 )
792  DO jl = 1, jprecj
793  pt2d(:,ijhom+jl) = t2ns(:,jl,2)
794  END DO
795  CASE ( 0 )
796  DO jl = 1, jprecj
797  pt2d(:,jl ) = t2sn(:,jl,2)
798  pt2d(:,ijhom+jl) = t2ns(:,jl,2)
799  END DO
800  CASE ( 1 )
801  DO jl = 1, jprecj
802  pt2d(:,jl ) = t2sn(:,jl,2)
803  END DO
804  END SELECT
805 
806 
807  ! 4. north fold treatment
808  ! -----------------------
809  !
810  IF( npolj /= 0 .AND. .NOT. present(cd_mpp) ) THEN
811  !
812  SELECT CASE ( jpni )
813  CASE ( 1 ) ; CALL lbc_nfd( pt2d, cd_type, psgn ) ! only 1 northern proc, no mpp
814  CASE default ; CALL mpp_lbc_north( pt2d, cd_type, psgn ) ! for all northern procs.
815  END SELECT
816  !
817  ENDIF
818  !
819  END SUBROUTINE mpp_lnk_2d
820 
821 
822  SUBROUTINE mpp_lnk_3d_gather( ptab1, cd_type1, ptab2, cd_type2, psgn )
823  !!----------------------------------------------------------------------
824  !! *** routine mpp_lnk_3d_gather ***
825  !!
826  !! ** Purpose : Message passing manadgement for two 3D arrays
827  !!
828  !! ** Method : Use mppsend and mpprecv function for passing mask
829  !! between processors following neighboring subdomains.
830  !! domain parameters
831  !! nlci : first dimension of the local subdomain
832  !! nlcj : second dimension of the local subdomain
833  !! nbondi : mark for "east-west local boundary"
834  !! nbondj : mark for "north-south local boundary"
835  !! noea : number for local neighboring processors
836  !! nowe : number for local neighboring processors
837  !! noso : number for local neighboring processors
838  !! nono : number for local neighboring processors
839  !!
840  !! ** Action : ptab1 and ptab2 with update value at its periphery
841  !!
842  !!----------------------------------------------------------------------
843  REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT(inout) :: ptab1 ! first and second 3D array on which
844  REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT(inout) :: ptab2 ! the boundary condition is applied
845  CHARACTER(len=1) , INTENT(in ) :: cd_type1 ! nature of ptab1 and ptab2 arrays
846  CHARACTER(len=1) , INTENT(in ) :: cd_type2 ! i.e. grid-points = T , U , V , F or W points
847  REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary
848  !! ! = 1. , the sign is kept
849  INTEGER :: jl ! dummy loop indices
850  INTEGER :: imigr, iihom, ijhom ! temporary integers
851  INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend
852  INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend
853  !!----------------------------------------------------------------------
854 
855  ! 1. standard boundary treatment
856  ! ------------------------------
857  ! ! East-West boundaries
858  ! !* Cyclic east-west
859  IF( nbondi == 2 .AND. (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) ) THEN
860  ptab1( 1 ,:,:) = ptab1(jpim1,:,:)
861  ptab1(jpi,:,:) = ptab1( 2 ,:,:)
862  ptab2( 1 ,:,:) = ptab2(jpim1,:,:)
863  ptab2(jpi,:,:) = ptab2( 2 ,:,:)
864  ELSE !* closed
865  IF( .NOT. cd_type1 == 'F' ) ptab1( 1 :jpreci,:,:) = 0.e0 ! south except at F-point
866  IF( .NOT. cd_type2 == 'F' ) ptab2( 1 :jpreci,:,:) = 0.e0
867  ptab1(nlci-jpreci+1:jpi ,:,:) = 0.e0 ! north
868  ptab2(nlci-jpreci+1:jpi ,:,:) = 0.e0
869  ENDIF
870 
871 
872  ! ! North-South boundaries
873  IF( .NOT. cd_type1 == 'F' ) ptab1(:, 1 :jprecj,:) = 0.e0 ! south except at F-point
874  IF( .NOT. cd_type2 == 'F' ) ptab2(:, 1 :jprecj,:) = 0.e0
875  ptab1(:,nlcj-jprecj+1:jpj ,:) = 0.e0 ! north
876  ptab2(:,nlcj-jprecj+1:jpj ,:) = 0.e0
877 
878 
879  ! 2. East and west directions exchange
880  ! ------------------------------------
881  ! we play with the neigbours AND the row number because of the periodicity
882  !
883  SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions
884  CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case)
885  iihom = nlci-nreci
886  DO jl = 1, jpreci
887  t4ew(:,jl,:,1,1) = ptab1(jpreci+jl,:,:)
888  t4we(:,jl,:,1,1) = ptab1(iihom +jl,:,:)
889  t4ew(:,jl,:,2,1) = ptab2(jpreci+jl,:,:)
890  t4we(:,jl,:,2,1) = ptab2(iihom +jl,:,:)
891  END DO
892  END SELECT
893  !
894  ! ! Migrations
895  imigr = jpreci * jpj * jpk *2
896  !
897  SELECT CASE ( nbondi )
898  CASE ( -1 )
899  CALL mppsend( 2, t4we(1,1,1,1,1), imigr, noea, ml_req1 )
900  CALL mpprecv( 1, t4ew(1,1,1,1,2), imigr, noea )
901  IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)
902  CASE ( 0 )
903  CALL mppsend( 1, t4ew(1,1,1,1,1), imigr, nowe, ml_req1 )
904  CALL mppsend( 2, t4we(1,1,1,1,1), imigr, noea, ml_req2 )
905  CALL mpprecv( 1, t4ew(1,1,1,1,2), imigr, noea )
906  CALL mpprecv( 2, t4we(1,1,1,1,2), imigr, nowe )
907  IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)
908  IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err)
909  CASE ( 1 )
910  CALL mppsend( 1, t4ew(1,1,1,1,1), imigr, nowe, ml_req1 )
911  CALL mpprecv( 2, t4we(1,1,1,1,2), imigr, nowe )
912  IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)
913  END SELECT
914  !
915  ! ! Write Dirichlet lateral conditions
916  iihom = nlci - jpreci
917  !
918  SELECT CASE ( nbondi )
919  CASE ( -1 )
920  DO jl = 1, jpreci
921  ptab1(iihom+jl,:,:) = t4ew(:,jl,:,1,2)
922  ptab2(iihom+jl,:,:) = t4ew(:,jl,:,2,2)
923  END DO
924  CASE ( 0 )
925  DO jl = 1, jpreci
926  ptab1(jl ,:,:) = t4we(:,jl,:,1,2)
927  ptab1(iihom+jl,:,:) = t4ew(:,jl,:,1,2)
928  ptab2(jl ,:,:) = t4we(:,jl,:,2,2)
929  ptab2(iihom+jl,:,:) = t4ew(:,jl,:,2,2)
930  END DO
931  CASE ( 1 )
932  DO jl = 1, jpreci
933  ptab1(jl ,:,:) = t4we(:,jl,:,1,2)
934  ptab2(jl ,:,:) = t4we(:,jl,:,2,2)
935  END DO
936  END SELECT
937 
938 
939  ! 3. North and south directions
940  ! -----------------------------
941  ! always closed : we play only with the neigbours
942  !
943  IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions
944  ijhom = nlcj - nrecj
945  DO jl = 1, jprecj
946  t4sn(:,jl,:,1,1) = ptab1(:,ijhom +jl,:)
947  t4ns(:,jl,:,1,1) = ptab1(:,jprecj+jl,:)
948  t4sn(:,jl,:,2,1) = ptab2(:,ijhom +jl,:)
949  t4ns(:,jl,:,2,1) = ptab2(:,jprecj+jl,:)
950  END DO
951  ENDIF
952  !
953  ! ! Migrations
954  imigr = jprecj * jpi * jpk * 2
955  !
956  SELECT CASE ( nbondj )
957  CASE ( -1 )
958  CALL mppsend( 4, t4sn(1,1,1,1,1), imigr, nono, ml_req1 )
959  CALL mpprecv( 3, t4ns(1,1,1,1,2), imigr, nono )
960  IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)
961  CASE ( 0 )
962  CALL mppsend( 3, t4ns(1,1,1,1,1), imigr, noso, ml_req1 )
963  CALL mppsend( 4, t4sn(1,1,1,1,1), imigr, nono, ml_req2 )
964  CALL mpprecv( 3, t4ns(1,1,1,1,2), imigr, nono )
965  CALL mpprecv( 4, t4sn(1,1,1,1,2), imigr, noso )
966  IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)
967  IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err)
968  CASE ( 1 )
969  CALL mppsend( 3, t4ns(1,1,1,1,1), imigr, noso, ml_req1 )
970  CALL mpprecv( 4, t4sn(1,1,1,1,2), imigr, noso )
971  IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)
972  END SELECT
973  !
974  ! ! Write Dirichlet lateral conditions
975  ijhom = nlcj - jprecj
976  !
977  SELECT CASE ( nbondj )
978  CASE ( -1 )
979  DO jl = 1, jprecj
980  ptab1(:,ijhom+jl,:) = t4ns(:,jl,:,1,2)
981  ptab2(:,ijhom+jl,:) = t4ns(:,jl,:,2,2)
982  END DO
983  CASE ( 0 )
984  DO jl = 1, jprecj
985  ptab1(:,jl ,:) = t4sn(:,jl,:,1,2)
986  ptab1(:,ijhom+jl,:) = t4ns(:,jl,:,1,2)
987  ptab2(:,jl ,:) = t4sn(:,jl,:,2,2)
988  ptab2(:,ijhom+jl,:) = t4ns(:,jl,:,2,2)
989  END DO
990  CASE ( 1 )
991  DO jl = 1, jprecj
992  ptab1(:,jl,:) = t4sn(:,jl,:,1,2)
993  ptab2(:,jl,:) = t4sn(:,jl,:,2,2)
994  END DO
995  END SELECT
996 
997 
998  ! 4. north fold treatment
999  ! -----------------------
1000  IF( npolj /= 0 ) THEN
1001  !
1002  SELECT CASE ( jpni )
1003  CASE ( 1 )
1004  CALL lbc_nfd( ptab1, cd_type1, psgn ) ! only for northern procs.
1005  CALL lbc_nfd( ptab2, cd_type2, psgn )
1006  CASE default
1007  CALL mpp_lbc_north( ptab1, cd_type1, psgn ) ! for all northern procs.
1008  CALL mpp_lbc_north(ptab2, cd_type2, psgn)
1009  END SELECT
1010  !
1011  ENDIF
1012  !
1013  END SUBROUTINE mpp_lnk_3d_gather
1014 
1015 
1016  SUBROUTINE mpp_lnk_2d_e( pt2d, cd_type, psgn )
1017  !!----------------------------------------------------------------------
1018  !! *** routine mpp_lnk_2d_e ***
1019  !!
1020  !! ** Purpose : Message passing manadgement for 2d array (with halo)
1021  !!
1022  !! ** Method : Use mppsend and mpprecv function for passing mask
1023  !! between processors following neighboring subdomains.
1024  !! domain parameters
1025  !! nlci : first dimension of the local subdomain
1026  !! nlcj : second dimension of the local subdomain
1027  !! jpr2di : number of rows for extra outer halo
1028  !! jpr2dj : number of columns for extra outer halo
1029  !! nbondi : mark for "east-west local boundary"
1030  !! nbondj : mark for "north-south local boundary"
1031  !! noea : number for local neighboring processors
1032  !! nowe : number for local neighboring processors
1033  !! noso : number for local neighboring processors
1034  !! nono : number for local neighboring processors
1035  !!
1036  !!----------------------------------------------------------------------
1037  REAL(wp), DIMENSION(1-jpr2di:jpi+jpr2di,1-jpr2dj:jpj+jpr2dj), INTENT(inout) :: pt2d ! 2D array with extra halo
1038  CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of ptab array grid-points
1039  ! ! = T , U , V , F , W and I points
1040  REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the
1041  !! ! north boundary, = 1. otherwise
1042  INTEGER :: jl ! dummy loop indices
1043  INTEGER :: imigr, iihom, ijhom ! temporary integers
1044  INTEGER :: ipreci, iprecj ! temporary integers
1045  INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend
1046  INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend
1047  !!----------------------------------------------------------------------
1048 
1049  ipreci = jpreci + jpr2di ! take into account outer extra 2D overlap area
1050  iprecj = jprecj + jpr2dj
1051 
1052 
1053  ! 1. standard boundary treatment
1054  ! ------------------------------
1055  ! Order matters Here !!!!
1056  !
1057  ! !* North-South boundaries (always colsed)
1058  IF( .NOT. cd_type == 'F' ) pt2d(:, 1-jpr2dj : jprecj ) = 0.e0 ! south except at F-point
1059  pt2d(:,nlcj-jprecj+1:jpj+jpr2dj) = 0.e0 ! north
1060 
1061  ! ! East-West boundaries
1062  ! !* Cyclic east-west
1063  IF( nbondi == 2 .AND. (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) ) THEN
1064  pt2d(1-jpr2di: 1 ,:) = pt2d(jpim1-jpr2di: jpim1 ,:) ! east
1065  pt2d( jpi :jpi+jpr2di,:) = pt2d( 2 :2+jpr2di,:) ! west
1066  !
1067  ELSE !* closed
1068  IF( .NOT. cd_type == 'F' ) pt2d( 1-jpr2di :jpreci ,:) = 0.e0 ! south except at F-point
1069  pt2d(nlci-jpreci+1:jpi+jpr2di,:) = 0.e0 ! north
1070  ENDIF
1071  !
1072 
1073  ! north fold treatment
1074  ! -----------------------
1075  IF( npolj /= 0 ) THEN
1076  !
1077  SELECT CASE ( jpni )
1078  CASE ( 1 ) ; CALL lbc_nfd( pt2d(1:jpi,1:jpj+jpr2dj), cd_type, psgn, pr2dj=jpr2dj )
1079  CASE default ; CALL mpp_lbc_north_e( pt2d , cd_type, psgn )
1080  END SELECT
1081  !
1082  ENDIF
1083 
1084  ! 2. East and west directions exchange
1085  ! ------------------------------------
1086  ! we play with the neigbours AND the row number because of the periodicity
1087  !
1088  SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions
1089  CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case)
1090  iihom = nlci-nreci-jpr2di
1091  DO jl = 1, ipreci
1092  tr2ew(:,jl,1) = pt2d(jpreci+jl,:)
1093  tr2we(:,jl,1) = pt2d(iihom +jl,:)
1094  END DO
1095  END SELECT
1096  !
1097  ! ! Migrations
1098  imigr = ipreci * ( jpj + 2*jpr2dj)
1099  !
1100  SELECT CASE ( nbondi )
1101  CASE ( -1 )
1102  CALL mppsend( 2, tr2we(1-jpr2dj,1,1), imigr, noea, ml_req1 )
1103  CALL mpprecv( 1, tr2ew(1-jpr2dj,1,2), imigr, noea )
1104  IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)
1105  CASE ( 0 )
1106  CALL mppsend( 1, tr2ew(1-jpr2dj,1,1), imigr, nowe, ml_req1 )
1107  CALL mppsend( 2, tr2we(1-jpr2dj,1,1), imigr, noea, ml_req2 )
1108  CALL mpprecv( 1, tr2ew(1-jpr2dj,1,2), imigr, noea )
1109  CALL mpprecv( 2, tr2we(1-jpr2dj,1,2), imigr, nowe )
1110  IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)
1111  IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err)
1112  CASE ( 1 )
1113  CALL mppsend( 1, tr2ew(1-jpr2dj,1,1), imigr, nowe, ml_req1 )
1114  CALL mpprecv( 2, tr2we(1-jpr2dj,1,2), imigr, nowe )
1115  IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)
1116  END SELECT
1117  !
1118  ! ! Write Dirichlet lateral conditions
1119  iihom = nlci - jpreci
1120  !
1121  SELECT CASE ( nbondi )
1122  CASE ( -1 )
1123  DO jl = 1, ipreci
1124  pt2d(iihom+jl,:) = tr2ew(:,jl,2)
1125  END DO
1126  CASE ( 0 )
1127  DO jl = 1, ipreci
1128  pt2d(jl-jpr2di,:) = tr2we(:,jl,2)
1129  pt2d( iihom+jl,:) = tr2ew(:,jl,2)
1130  END DO
1131  CASE ( 1 )
1132  DO jl = 1, ipreci
1133  pt2d(jl-jpr2di,:) = tr2we(:,jl,2)
1134  END DO
1135  END SELECT
1136 
1137 
1138  ! 3. North and south directions
1139  ! -----------------------------
1140  ! always closed : we play only with the neigbours
1141  !
1142  IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions
1143  ijhom = nlcj-nrecj-jpr2dj
1144  DO jl = 1, iprecj
1145  tr2sn(:,jl,1) = pt2d(:,ijhom +jl)
1146  tr2ns(:,jl,1) = pt2d(:,jprecj+jl)
1147  END DO
1148  ENDIF
1149  !
1150  ! ! Migrations
1151  imigr = iprecj * ( jpi + 2*jpr2di )
1152  !
1153  SELECT CASE ( nbondj )
1154  CASE ( -1 )
1155  CALL mppsend( 4, tr2sn(1-jpr2di,1,1), imigr, nono, ml_req1 )
1156  CALL mpprecv( 3, tr2ns(1-jpr2di,1,2), imigr, nono )
1157  IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)
1158  CASE ( 0 )
1159  CALL mppsend( 3, tr2ns(1-jpr2di,1,1), imigr, noso, ml_req1 )
1160  CALL mppsend( 4, tr2sn(1-jpr2di,1,1), imigr, nono, ml_req2 )
1161  CALL mpprecv( 3, tr2ns(1-jpr2di,1,2), imigr, nono )
1162  CALL mpprecv( 4, tr2sn(1-jpr2di,1,2), imigr, noso )
1163  IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)
1164  IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err)
1165  CASE ( 1 )
1166  CALL mppsend( 3, tr2ns(1-jpr2di,1,1), imigr, noso, ml_req1 )
1167  CALL mpprecv( 4, tr2sn(1-jpr2di,1,2), imigr, noso )
1168  IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)
1169  END SELECT
1170  !
1171  ! ! Write Dirichlet lateral conditions
1172  ijhom = nlcj - jprecj
1173  !
1174  SELECT CASE ( nbondj )
1175  CASE ( -1 )
1176  DO jl = 1, iprecj
1177  pt2d(:,ijhom+jl) = tr2ns(:,jl,2)
1178  END DO
1179  CASE ( 0 )
1180  DO jl = 1, iprecj
1181  pt2d(:,jl-jpr2dj) = tr2sn(:,jl,2)
1182  pt2d(:,ijhom+jl ) = tr2ns(:,jl,2)
1183  END DO
1184  CASE ( 1 )
1185  DO jl = 1, iprecj
1186  pt2d(:,jl-jpr2dj) = tr2sn(:,jl,2)
1187  END DO
1188  END SELECT
1189 
1190  END SUBROUTINE mpp_lnk_2d_e
1191 
1192 
1193  SUBROUTINE mppsend( ktyp, pmess, kbytes, kdest, md_req )
1194  !!----------------------------------------------------------------------
1195  !! *** routine mppsend ***
1196  !!
1197  !! ** Purpose : Send messag passing array
1198  !!
1199  !!----------------------------------------------------------------------
1200  REAL(wp), INTENT(inout) :: pmess(*) ! array of real
1201  INTEGER , INTENT(in ) :: kbytes ! size of the array pmess
1202  INTEGER , INTENT(in ) :: kdest ! receive process number
1203  INTEGER , INTENT(in ) :: ktyp ! tag of the message
1204  INTEGER , INTENT(in ) :: md_req ! argument for isend
1205  !!
1206  INTEGER :: iflag
1207  !!----------------------------------------------------------------------
1208  !
1209  SELECT CASE ( cn_mpi_send )
1210  CASE ( 'S' ) ! Standard mpi send (blocking)
1211  CALL mpi_send( pmess, kbytes, mpi_double_precision, kdest , ktyp, mpi_comm_opa , iflag )
1212  CASE ( 'B' ) ! Buffer mpi send (blocking)
1213  CALL mpi_bsend( pmess, kbytes, mpi_double_precision, kdest , ktyp, mpi_comm_opa , iflag )
1214  CASE ( 'I' ) ! Immediate mpi send (non-blocking send)
1215  ! be carefull, one more argument here : the mpi request identifier..
1216  CALL mpi_isend( pmess, kbytes, mpi_double_precision, kdest , ktyp, mpi_comm_opa, md_req, iflag )
1217  END SELECT
1218  !
1219  END SUBROUTINE mppsend
1220 
1221 
1222  SUBROUTINE mpprecv( ktyp, pmess, kbytes, ksource )
1223  !!----------------------------------------------------------------------
1224  !! *** routine mpprecv ***
1225  !!
1226  !! ** Purpose : Receive messag passing array
1227  !!
1228  !!----------------------------------------------------------------------
1229  REAL(wp), INTENT(inout) :: pmess(*) ! array of real
1230  INTEGER , INTENT(in ) :: kbytes ! suze of the array pmess
1231  INTEGER , INTENT(in ) :: ktyp ! Tag of the recevied message
1232  INTEGER, OPTIONAL, INTENT(in) :: ksource ! source process number
1233  !!
1234  INTEGER :: istatus(mpi_status_size)
1235  INTEGER :: iflag
1236  INTEGER :: use_source
1237  !!----------------------------------------------------------------------
1238  !
1239 
1240  ! If a specific process number has been passed to the receive call,
1241  ! use that one. Default is to use mpi_any_source
1242  use_source=mpi_any_source
1243  if(present(ksource)) then
1244  use_source=ksource
1245  end if
1246 
1247  CALL mpi_recv( pmess, kbytes, mpi_double_precision, use_source, ktyp, mpi_comm_opa, istatus, iflag )
1248  !
1249  END SUBROUTINE mpprecv
1250 
1251 
1252  SUBROUTINE mppgather( ptab, kp, pio )
1253  !!----------------------------------------------------------------------
1254  !! *** routine mppgather ***
1255  !!
1256  !! ** Purpose : Transfert between a local subdomain array and a work
1257  !! array which is distributed following the vertical level.
1258  !!
1259  !!----------------------------------------------------------------------
1260  REAL(wp), DIMENSION(jpi,jpj), INTENT(in ) :: ptab ! subdomain input array
1261  INTEGER , INTENT(in ) :: kp ! record length
1262  REAL(wp), DIMENSION(jpi,jpj,jpnij), INTENT( out) :: pio ! subdomain input array
1263  !!
1264  INTEGER :: itaille, ierror ! temporary integer
1265  !!---------------------------------------------------------------------
1266  !
1267  itaille = jpi * jpj
1268  CALL mpi_gather( ptab, itaille, mpi_double_precision, pio, itaille , &
1269  & mpi_double_precision, kp , mpi_comm_opa, ierror )
1270  !
1271  END SUBROUTINE mppgather
1272 
1273 
1274  SUBROUTINE mppscatter( pio, kp, ptab )
1275  !!----------------------------------------------------------------------
1276  !! *** routine mppscatter ***
1277  !!
1278  !! ** Purpose : Transfert between awork array which is distributed
1279  !! following the vertical level and the local subdomain array.
1280  !!
1281  !!----------------------------------------------------------------------
1282  REAL(wp), DIMENSION(jpi,jpj,jpnij) :: pio ! output array
1283  INTEGER :: kp ! Tag (not used with MPI
1284  REAL(wp), DIMENSION(jpi,jpj) :: ptab ! subdomain array input
1285  !!
1286  INTEGER :: itaille, ierror ! temporary integer
1287  !!---------------------------------------------------------------------
1288  !
1289  itaille=jpi*jpj
1290  !
1291  CALL mpi_scatter( pio, itaille, mpi_double_precision, ptab, itaille , &
1292  & mpi_double_precision, kp , mpi_comm_opa, ierror )
1293  !
1294  END SUBROUTINE mppscatter
1295 #endif
1296 
1297  SUBROUTINE mppmax_a_int( ktab, kdim, kcom )
1298  !!----------------------------------------------------------------------
1299  !! *** routine mppmax_a_int ***
1300  !!
1301  !! ** Purpose : Find maximum value in an integer layout array
1302  !!
1303  !!----------------------------------------------------------------------
1304  INTEGER , INTENT(in ) :: kdim ! size of array
1305  INTEGER , INTENT(inout), DIMENSION(kdim) :: ktab ! input array
1306  INTEGER , INTENT(in ), OPTIONAL :: kcom !
1307  !!
1308  INTEGER :: ierror, localcomm ! temporary integer
1309  INTEGER, DIMENSION(kdim) :: iwork
1310  !!----------------------------------------------------------------------
1311  !
1312 #if !defined in_surfex || defined SFX_MPI
1313  localcomm = mpi_comm_opa
1314  IF( present(kcom) ) localcomm = kcom
1315  !
1316 #if !defined in_arpege
1317 !$OMP SINGLE
1318  CALL mpi_allreduce( ktab, iwork, kdim, mpi_integer, mpi_max, localcomm, ierror )
1319 !$OMP END SINGLE
1320  !
1321  ktab(:) = iwork(:)
1322 #else
1323  CALL abor1_sfx("lib_mpp:mmpmax_a_int : Cannot yet sum a real array in Arpege")
1324 #endif
1325 #endif
1326  !
1327  END SUBROUTINE mppmax_a_int
1328 
1329 
1330  SUBROUTINE mppmax_int( ktab, kcom )
1331  !!----------------------------------------------------------------------
1332  !! *** routine mppmax_int ***
1333  !!
1334  !! ** Purpose : Find maximum value in an integer layout array
1335  !!
1336  !!----------------------------------------------------------------------
1337  INTEGER, INTENT(inout) :: ktab ! ???
1338  INTEGER, INTENT(in ), OPTIONAL :: kcom ! ???
1339  !!
1340  INTEGER :: ierror, iwork, localcomm ! temporary integer
1341 #ifdef in_arpege
1342  INTEGER(KIND=JPIB) :: jb
1343 #endif
1344  !!----------------------------------------------------------------------
1345  !
1346 #if !defined in_surfex || defined SFX_MPI
1347  localcomm = mpi_comm_opa
1348  IF( present(kcom) ) localcomm = kcom
1349  !
1350 #if !defined in_arpege
1351 !$OMP SINGLE
1352  CALL mpi_allreduce( ktab, iwork, 1, mpi_integer, mpi_max, localcomm, ierror)
1353 !$OMP END SINGLE
1354  !
1355  ktab = iwork
1356 #else
1357  jb=ktab
1358 ! Commented out for now - does not work !
1359 ! CALL MPL_ALLREDUCE(jb,'MAX')
1360  ktab=jb
1361 #endif
1362 #endif
1363  !
1364  END SUBROUTINE mppmax_int
1365 
1366 
1367  SUBROUTINE mppmin_a_int( ktab, kdim, kcom )
1368  !!----------------------------------------------------------------------
1369  !! *** routine mppmin_a_int ***
1370  !!
1371  !! ** Purpose : Find minimum value in an integer layout array
1372  !!
1373  !!----------------------------------------------------------------------
1374  INTEGER , INTENT( in ) :: kdim ! size of array
1375  INTEGER , INTENT(inout), DIMENSION(kdim) :: ktab ! input array
1376  INTEGER , INTENT( in ), OPTIONAL :: kcom ! input array
1377  !!
1378  INTEGER :: ierror, localcomm ! temporary integer
1379  INTEGER, DIMENSION(kdim) :: iwork
1380  !!----------------------------------------------------------------------
1381  !
1382 #if !defined in_surfex || defined SFX_MPI
1383  localcomm = mpi_comm_opa
1384  IF( present(kcom) ) localcomm = kcom
1385  !
1386 #if !defined in_arpege
1387 !$OMP SINGLE
1388  CALL mpi_allreduce( ktab, iwork, kdim, mpi_integer, mpi_min, localcomm, ierror )
1389 !$OMP END SINGLE
1390  !
1391  ktab(:) = iwork(:)
1392 #else
1393  CALL abor1_sfx("lib_mpp:mmpmin_a_int : Cannot yet min a real array in Arpege")
1394 #endif
1395 #endif
1396  !
1397  END SUBROUTINE mppmin_a_int
1398 
1399 
1400  SUBROUTINE mppmin_int( ktab, kcom )
1401  !!----------------------------------------------------------------------
1402  !! *** routine mppmin_int ***
1403  !!
1404  !! ** Purpose : Find minimum value in an integer layout array
1405  !!
1406  !!----------------------------------------------------------------------
1407  INTEGER, INTENT(inout) :: ktab ! ???
1408  INTEGER , INTENT( in ), OPTIONAL :: kcom ! input array
1409  !!
1410  INTEGER :: ierror, iwork, localcomm
1411 #ifdef in_arpege
1412  INTEGER(KIND=JPIB) :: jb
1413 #endif
1414  !!----------------------------------------------------------------------
1415  !
1416 #if !defined in_surfex || defined SFX_MPI
1417  localcomm = mpi_comm_opa
1418  IF( present(kcom) ) localcomm = kcom
1419  !
1420 #if !defined in_arpege
1421 !$OMP SINGLE
1422  CALL mpi_allreduce( ktab, iwork, 1, mpi_integer, mpi_min, localcomm, ierror )
1423 !$OMP END SINGLE
1424  !
1425  ktab = iwork
1426 #else
1427  jb=ktab
1428 ! Commented out for now - does not work !
1429 ! CALL MPL_ALLREDUCE(jb,'MIN')
1430  ktab=jb
1431 #endif
1432 #endif
1433  !
1434  END SUBROUTINE mppmin_int
1435 
1436 
1437  SUBROUTINE mppsum_a_int( ktab, kdim )
1438  !!----------------------------------------------------------------------
1439  !! *** routine mppsum_a_int ***
1440  !!
1441  !! ** Purpose : Global integer sum, 1D array case
1442  !!
1443  !!----------------------------------------------------------------------
1444  INTEGER, INTENT(in ) :: kdim ! ???
1445  INTEGER, INTENT(inout), DIMENSION (kdim) :: ktab ! ???
1446  !!
1447  INTEGER :: ierror
1448  INTEGER, DIMENSION (kdim) :: iwork
1449  !!----------------------------------------------------------------------
1450  !
1451 #if !defined in_surfex || defined SFX_MPI
1452 #if !defined in_arpege
1453 !$OMP SINGLE
1454  CALL mpi_allreduce( ktab, iwork, kdim, mpi_integer, mpi_sum, mpi_comm_opa, ierror )
1455 !$OMP END SINGLE
1456  !
1457  ktab(:) = iwork(:)
1458 #else
1459  CALL abor1_sfx("lib_mpp:mmpsum_a_int : Cannot yet sum a real array in Arpege")
1460 #endif
1461 #endif
1462  !
1463  END SUBROUTINE mppsum_a_int
1464 
1465 
1466  SUBROUTINE mppsum_int( ktab )
1467  !!----------------------------------------------------------------------
1468  !! *** routine mppsum_int ***
1469  !!
1470  !! ** Purpose : Global integer sum
1471  !!
1472  !!----------------------------------------------------------------------
1473  INTEGER, INTENT(inout) :: ktab
1474  !!
1475  INTEGER :: ierror, iwork
1476 #ifdef in_arpege
1477  INTEGER(KIND=JPIB) :: jb
1478 #endif
1479  !!----------------------------------------------------------------------
1480  !
1481 #if !defined in_surfex || defined SFX_MPI
1482 #if !defined in_arpege
1483 !$OMP SINGLE
1484  CALL mpi_allreduce( ktab, iwork, 1, mpi_integer, mpi_sum, mpi_comm_opa, ierror)
1485 !$OMP END SINGLE
1486  !
1487  ktab = iwork
1488 #else
1489  jb=ktab
1490 ! Commented out for now - does not work !
1491 ! CALL MPL_ALLREDUCE(jb,'SUM')
1492  ktab=jb
1493 #endif
1494 #endif
1495  !
1496  END SUBROUTINE mppsum_int
1497 
1498 
1499  SUBROUTINE mppmax_a_real( ptab, kdim, kcom )
1500  !!----------------------------------------------------------------------
1501  !! *** routine mppmax_a_real ***
1502  !!
1503  !! ** Purpose : Maximum
1504  !!
1505  !!----------------------------------------------------------------------
1506  INTEGER , INTENT(in ) :: kdim
1507  REAL(wp), INTENT(inout), DIMENSION(kdim) :: ptab
1508  INTEGER , INTENT(in ), OPTIONAL :: kcom
1509  !!
1510  INTEGER :: ierror, localcomm
1511  REAL(wp), DIMENSION(kdim) :: zwork
1512  !!----------------------------------------------------------------------
1513  !
1514 #if !defined in_surfex || defined SFX_MPI
1515  localcomm = mpi_comm_opa
1516  IF( present(kcom) ) localcomm = kcom
1517  !
1518 #if !defined in_arpege
1519 !$OMP SINGLE
1520  CALL mpi_allreduce( ptab, zwork, kdim, mpi_double_precision, mpi_max, localcomm, ierror )
1521 !$OMP END SINGLE
1522  !
1523  ptab(:) = zwork(:)
1524 #else
1525  CALL abor1_sfx("lib_mpp:mmpmax_a_real : Cannot yet make a max on a real array in Arpege")
1526 #endif
1527 #endif
1528  END SUBROUTINE mppmax_a_real
1529 
1530 
1531  SUBROUTINE mppmax_real( ptab, kcom )
1532  !!----------------------------------------------------------------------
1533  !! *** routine mppmax_real ***
1534  !!
1535  !! ** Purpose : Maximum
1536  !!
1537  !!----------------------------------------------------------------------
1538  REAL(wp), INTENT(inout) :: ptab ! ???
1539  INTEGER , INTENT(in ), OPTIONAL :: kcom ! ???
1540  !!
1541  INTEGER :: ierror, localcomm
1542  REAL(wp) :: zwork
1543 #ifdef in_arpege
1544  REAL(KIND=JPRB) :: rb
1545 #endif
1546  !!----------------------------------------------------------------------
1547  !
1548 #if !defined in_surfex || defined SFX_MPI
1549  localcomm = mpi_comm_opa
1550  IF( present(kcom) ) localcomm = kcom
1551  !
1552 #if !defined in_arpege
1553 !$OMP SINGLE
1554  CALL mpi_allreduce( ptab, zwork, 1, mpi_double_precision, mpi_max, localcomm, ierror )
1555 !$OMP END SINGLE
1556  !
1557  ptab = zwork
1558 #else
1559  rb=ptab
1560 ! Commented out for now - does not work !
1561 ! CALL MPL_ALLREDUCE(rb,'MAX',LDREPROD=.TRUE.)
1562  ptab=rb
1563 #endif
1564 #endif
1565  END SUBROUTINE mppmax_real
1566 
1567 
1568  SUBROUTINE mppmin_a_real( ptab, kdim, kcom )
1569  !!----------------------------------------------------------------------
1570  !! *** routine mppmin_a_real ***
1571  !!
1572  !! ** Purpose : Minimum of REAL, array case
1573  !!
1574  !!-----------------------------------------------------------------------
1575  INTEGER , INTENT(in ) :: kdim
1576  REAL(wp), INTENT(inout), DIMENSION(kdim) :: ptab
1577  INTEGER , INTENT(in ), OPTIONAL :: kcom
1578  !!
1579  INTEGER :: ierror, localcomm
1580  REAL(wp), DIMENSION(kdim) :: zwork
1581  !!-----------------------------------------------------------------------
1582  !
1583 #if !defined in_surfex || defined SFX_MPI
1584  localcomm = mpi_comm_opa
1585  IF( present(kcom) ) localcomm = kcom
1586  !
1587 #if !defined in_arpege
1588 !$OMP SINGLE
1589  CALL mpi_allreduce( ptab, zwork, kdim, mpi_double_precision, mpi_min, localcomm, ierror )
1590 !$OMP END SINGLE
1591  !
1592  ptab(:) = zwork(:)
1593 #else
1594  CALL abor1_sfx("lib_mpp:mmpmin_a_real : Cannot yet make a min on a real array in Arpege")
1595 #endif
1596 #endif
1597  END SUBROUTINE mppmin_a_real
1598 
1599 
1600  SUBROUTINE mppmin_real( ptab, kcom )
1601  !!----------------------------------------------------------------------
1602  !! *** routine mppmin_real ***
1603  !!
1604  !! ** Purpose : minimum of REAL, scalar case
1605  !!
1606  !!-----------------------------------------------------------------------
1607  REAL(wp), INTENT(inout) :: ptab !
1608  INTEGER , INTENT(in ), OPTIONAL :: kcom
1609  !!
1610  INTEGER :: ierror
1611  REAL(wp) :: zwork
1612  INTEGER :: localcomm
1613 #ifdef in_arpege
1614  REAL(KIND=JPRB) :: rb
1615 #endif
1616  !!-----------------------------------------------------------------------
1617  !
1618 #if !defined in_surfex || defined SFX_MPI
1619  localcomm = mpi_comm_opa
1620  IF( present(kcom) ) localcomm = kcom
1621  !
1622 #if !defined in_arpege
1623 !$OMP SINGLE
1624  CALL mpi_allreduce( ptab, zwork, 1, mpi_double_precision, mpi_min, localcomm, ierror )
1625 !$OMP END SINGLE
1626  !
1627  ptab = zwork
1628 #else
1629  rb=ptab
1630 ! Commented out for now - does not work !
1631 ! CALL MPL_ALLREDUCE(rb,'MIN',LDREPROD=.TRUE.)
1632  ptab=rb
1633 #endif
1634 #endif
1635  END SUBROUTINE mppmin_real
1636 
1637 
1638  SUBROUTINE mppsum_a_real( ptab, kdim, kcom )
1639  !!----------------------------------------------------------------------
1640  !! *** routine mppsum_a_real ***
1641  !!
1642  !! ** Purpose : global sum, REAL ARRAY argument case
1643  !!
1644  !!-----------------------------------------------------------------------
1645  INTEGER , INTENT( in ) :: kdim ! size of ptab
1646  REAL(wp), DIMENSION(kdim), INTENT( inout ) :: ptab ! input array
1647  INTEGER , INTENT( in ), OPTIONAL :: kcom
1648  !!
1649  INTEGER :: ierror ! temporary integer
1650  INTEGER :: localcomm
1651  REAL(wp), DIMENSION(kdim) :: zwork ! temporary workspace
1652  !!-----------------------------------------------------------------------
1653  !
1654 #if !defined in_surfex || defined SFX_MPI
1655  localcomm = mpi_comm_opa
1656  IF( present(kcom) ) localcomm = kcom
1657  !
1658 #if !defined in_arpege
1659 !$OMP SINGLE
1660  CALL mpi_allreduce( ptab, zwork, kdim, mpi_double_precision, mpi_sum, localcomm, ierror )
1661 !$OMP END SINGLE
1662  !
1663  ptab(:) = zwork(:)
1664 #else
1665  CALL abor1_sfx("lib_mpp:mmpsum_a_real : Cannot yet make a sum on a real array in Arpege")
1666 #endif
1667 #endif
1668  END SUBROUTINE mppsum_a_real
1669 
1670 
1671  SUBROUTINE mppsum_real( ptab, kcom )
1672  !!----------------------------------------------------------------------
1673  !! *** routine mppsum_real ***
1674  !!
1675  !! ** Purpose : global sum, SCALAR argument case
1676  !!
1677  !!-----------------------------------------------------------------------
1678  REAL(wp), INTENT(inout) :: ptab ! input scalar
1679  INTEGER , INTENT(in ), OPTIONAL :: kcom
1680  !!
1681  INTEGER :: ierror, localcomm
1682  REAL(wp) :: zwork
1683 #ifdef in_arpege
1684  REAL(KIND=JPRB) :: rb
1685 #endif
1686  !!--------------------------------------------- -------------------------
1687  !
1688 #if !defined in_surfex || defined SFX_MPI
1689  localcomm = mpi_comm_opa
1690  IF( present(kcom) ) localcomm = kcom
1691  !
1692 #if !defined in_arpege
1693 !$OMP SINGLE
1694  CALL mpi_allreduce( ptab, zwork, 1, mpi_double_precision, mpi_sum, localcomm, ierror )
1695 !$OMP END SINGLE
1696  !
1697  ptab = zwork
1698 #else
1699  rb=ptab
1700 ! Commented out for now - does not work !
1701 ! CALL MPL_ALLREDUCE(rb,'SUM',LDREPROD=.TRUE.)
1702  ptab=rb
1703 #endif
1704 #endif
1705  END SUBROUTINE mppsum_real
1706 
1707 #if ! defined in_surfex
1708  SUBROUTINE mppsum_realdd( ytab, kcom )
1709  !!----------------------------------------------------------------------
1710  !! *** routine mppsum_realdd ***
1711  !!
1712  !! ** Purpose : global sum in Massively Parallel Processing
1713  !! SCALAR argument case for double-double precision
1714  !!
1715  !!-----------------------------------------------------------------------
1716  COMPLEX(wp), INTENT(inout) :: ytab ! input scalar
1717  INTEGER , INTENT( in ), OPTIONAL :: kcom
1718 
1719  !! * Local variables (MPI version)
1720  INTEGER :: ierror
1721  INTEGER :: localcomm
1722  COMPLEX(wp) :: zwork
1723 
1724  localcomm = mpi_comm_opa
1725  IF( present(kcom) ) localcomm = kcom
1726 
1727  ! reduce local sums into global sum
1728 !$OMP SINGLE
1729  CALL mpi_allreduce(ytab, zwork, 1, mpi_double_complex, &
1730  mpi_sumdd,localcomm,ierror)
1731 !$OMP END SINGLE
1732  ytab = zwork
1733 
1734  END SUBROUTINE mppsum_realdd
1735 
1736 
1737  SUBROUTINE mppsum_a_realdd( ytab, kdim, kcom )
1738  !!----------------------------------------------------------------------
1739  !! *** routine mppsum_a_realdd ***
1740  !!
1741  !! ** Purpose : global sum in Massively Parallel Processing
1742  !! COMPLEX ARRAY case for double-double precision
1743  !!
1744  !!-----------------------------------------------------------------------
1745  INTEGER , INTENT( in ) :: kdim ! size of ytab
1746  COMPLEX(wp), DIMENSION(kdim), INTENT( inout ) :: ytab ! input array
1747  INTEGER , INTENT( in ), OPTIONAL :: kcom
1748 
1749  !! * Local variables (MPI version)
1750  INTEGER :: ierror ! temporary integer
1751  INTEGER :: localcomm
1752  COMPLEX(wp), DIMENSION(kdim) :: zwork ! temporary workspace
1753 
1754  localcomm = mpi_comm_opa
1755  IF( present(kcom) ) localcomm = kcom
1756 !$OMP SINGLE
1757  CALL mpi_allreduce(ytab, zwork, kdim, mpi_double_complex, &
1758  mpi_sumdd,localcomm,ierror)
1759 !$OMP END SINGLE
1760  ytab(:) = zwork(:)
1761 
1762  END SUBROUTINE mppsum_a_realdd
1763 
1764 
1765  SUBROUTINE mpp_minloc2d( ptab, pmask, pmin, ki,kj )
1766  !!------------------------------------------------------------------------
1767  !! *** routine mpp_minloc ***
1768  !!
1769  !! ** Purpose : Compute the global minimum of an array ptab
1770  !! and also give its global position
1771  !!
1772  !! ** Method : Use MPI_ALLREDUCE with MPI_MINLOC
1773  !!
1774  !!--------------------------------------------------------------------------
1775  REAL(wp), DIMENSION (jpi,jpj), INTENT(in ) :: ptab ! Local 2D array
1776  REAL(wp), DIMENSION (jpi,jpj), INTENT(in ) :: pmask ! Local mask
1777  REAL(wp) , INTENT( out) :: pmin ! Global minimum of ptab
1778  INTEGER , INTENT( out) :: ki, kj ! index of minimum in global frame
1779  !!
1780  INTEGER , DIMENSION(2) :: ilocs
1781  INTEGER :: ierror
1782  REAL(wp) :: zmin ! local minimum
1783  REAL(wp), DIMENSION(2,1) :: zain, zaout
1784  !!-----------------------------------------------------------------------
1785  !
1786  zmin = minval( ptab(:,:) , mask= pmask == 1.e0 )
1787  ilocs = minloc( ptab(:,:) , mask= pmask == 1.e0 )
1788  !
1789  ki = ilocs(1) + nimpp - 1
1790  kj = ilocs(2) + njmpp - 1
1791  !
1792  zain(1,:)=zmin
1793  zain(2,:)=ki+10000.*kj
1794  !
1795  CALL mpi_allreduce( zain,zaout, 1, mpi_2double_precision,mpi_minloc,mpi_comm_opa,ierror)
1796  !
1797  pmin = zaout(1,1)
1798  kj = int(zaout(2,1)/10000.)
1799  ki = int(zaout(2,1) - 10000.*kj )
1800  !
1801  END SUBROUTINE mpp_minloc2d
1802 
1803 
1804  SUBROUTINE mpp_minloc3d( ptab, pmask, pmin, ki, kj ,kk)
1805  !!------------------------------------------------------------------------
1806  !! *** routine mpp_minloc ***
1807  !!
1808  !! ** Purpose : Compute the global minimum of an array ptab
1809  !! and also give its global position
1810  !!
1811  !! ** Method : Use MPI_ALLREDUCE with MPI_MINLOC
1812  !!
1813  !!--------------------------------------------------------------------------
1814  REAL(wp), DIMENSION (jpi,jpj,jpk), INTENT(in ) :: ptab ! Local 2D array
1815  REAL(wp), DIMENSION (jpi,jpj,jpk), INTENT(in ) :: pmask ! Local mask
1816  REAL(wp) , INTENT( out) :: pmin ! Global minimum of ptab
1817  INTEGER , INTENT( out) :: ki, kj, kk ! index of minimum in global frame
1818  !!
1819  INTEGER :: ierror
1820  REAL(wp) :: zmin ! local minimum
1821  INTEGER , DIMENSION(3) :: ilocs
1822  REAL(wp), DIMENSION(2,1) :: zain, zaout
1823  !!-----------------------------------------------------------------------
1824  !
1825  zmin = minval( ptab(:,:,:) , mask= pmask == 1.e0 )
1826  ilocs = minloc( ptab(:,:,:) , mask= pmask == 1.e0 )
1827  !
1828  ki = ilocs(1) + nimpp - 1
1829  kj = ilocs(2) + njmpp - 1
1830  kk = ilocs(3)
1831  !
1832  zain(1,:)=zmin
1833  zain(2,:)=ki+10000.*kj+100000000.*kk
1834  !
1835  CALL mpi_allreduce( zain,zaout, 1, mpi_2double_precision,mpi_minloc,mpi_comm_opa,ierror)
1836  !
1837  pmin = zaout(1,1)
1838  kk = int( zaout(2,1) / 100000000. )
1839  kj = int( zaout(2,1) - kk * 100000000. ) / 10000
1840  ki = int( zaout(2,1) - kk * 100000000. -kj * 10000. )
1841  !
1842  END SUBROUTINE mpp_minloc3d
1843 
1844 
1845  SUBROUTINE mpp_maxloc2d( ptab, pmask, pmax, ki, kj )
1846  !!------------------------------------------------------------------------
1847  !! *** routine mpp_maxloc ***
1848  !!
1849  !! ** Purpose : Compute the global maximum of an array ptab
1850  !! and also give its global position
1851  !!
1852  !! ** Method : Use MPI_ALLREDUCE with MPI_MINLOC
1853  !!
1854  !!--------------------------------------------------------------------------
1855  REAL(wp), DIMENSION (jpi,jpj), INTENT(in ) :: ptab ! Local 2D array
1856  REAL(wp), DIMENSION (jpi,jpj), INTENT(in ) :: pmask ! Local mask
1857  REAL(wp) , INTENT( out) :: pmax ! Global maximum of ptab
1858  INTEGER , INTENT( out) :: ki, kj ! index of maximum in global frame
1859  !!
1860  INTEGER :: ierror
1861  INTEGER, DIMENSION (2) :: ilocs
1862  REAL(wp) :: zmax ! local maximum
1863  REAL(wp), DIMENSION(2,1) :: zain, zaout
1864  !!-----------------------------------------------------------------------
1865  !
1866  zmax = maxval( ptab(:,:) , mask= pmask == 1.e0 )
1867  ilocs = maxloc( ptab(:,:) , mask= pmask == 1.e0 )
1868  !
1869  ki = ilocs(1) + nimpp - 1
1870  kj = ilocs(2) + njmpp - 1
1871  !
1872  zain(1,:) = zmax
1873  zain(2,:) = ki + 10000. * kj
1874  !
1875  CALL mpi_allreduce( zain,zaout, 1, mpi_2double_precision,mpi_maxloc,mpi_comm_opa,ierror)
1876  !
1877  pmax = zaout(1,1)
1878  kj = int( zaout(2,1) / 10000. )
1879  ki = int( zaout(2,1) - 10000.* kj )
1880  !
1881  END SUBROUTINE mpp_maxloc2d
1882 
1883 
1884  SUBROUTINE mpp_maxloc3d( ptab, pmask, pmax, ki, kj, kk )
1885  !!------------------------------------------------------------------------
1886  !! *** routine mpp_maxloc ***
1887  !!
1888  !! ** Purpose : Compute the global maximum of an array ptab
1889  !! and also give its global position
1890  !!
1891  !! ** Method : Use MPI_ALLREDUCE with MPI_MINLOC
1892  !!
1893  !!--------------------------------------------------------------------------
1894  REAL(wp), DIMENSION (jpi,jpj,jpk), INTENT(in ) :: ptab ! Local 2D array
1895  REAL(wp), DIMENSION (jpi,jpj,jpk), INTENT(in ) :: pmask ! Local mask
1896  REAL(wp) , INTENT( out) :: pmax ! Global maximum of ptab
1897  INTEGER , INTENT( out) :: ki, kj, kk ! index of maximum in global frame
1898  !!
1899  REAL(wp) :: zmax ! local maximum
1900  REAL(wp), DIMENSION(2,1) :: zain, zaout
1901  INTEGER , DIMENSION(3) :: ilocs
1902  INTEGER :: ierror
1903  !!-----------------------------------------------------------------------
1904  !
1905  zmax = maxval( ptab(:,:,:) , mask= pmask == 1.e0 )
1906  ilocs = maxloc( ptab(:,:,:) , mask= pmask == 1.e0 )
1907  !
1908  ki = ilocs(1) + nimpp - 1
1909  kj = ilocs(2) + njmpp - 1
1910  kk = ilocs(3)
1911  !
1912  zain(1,:)=zmax
1913  zain(2,:)=ki+10000.*kj+100000000.*kk
1914  !
1915  CALL mpi_allreduce( zain,zaout, 1, mpi_2double_precision,mpi_maxloc,mpi_comm_opa,ierror)
1916  !
1917  pmax = zaout(1,1)
1918  kk = int( zaout(2,1) / 100000000. )
1919  kj = int( zaout(2,1) - kk * 100000000. ) / 10000
1920  ki = int( zaout(2,1) - kk * 100000000. -kj * 10000. )
1921  !
1922  END SUBROUTINE mpp_maxloc3d
1923 
1924 
1925  SUBROUTINE mppsync()
1926  !!----------------------------------------------------------------------
1927  !! *** routine mppsync ***
1928  !!
1929  !! ** Purpose : Massively parallel processors, synchroneous
1930  !!
1931  !!-----------------------------------------------------------------------
1932  INTEGER :: ierror
1933  !!-----------------------------------------------------------------------
1934  !
1935  CALL mpi_barrier( mpi_comm_opa, ierror )
1936  !
1937  END SUBROUTINE mppsync
1938 
1939 
1940  SUBROUTINE mppstop
1941  !!----------------------------------------------------------------------
1942  !! *** routine mppstop ***
1943  !!
1944  !! ** purpose : Stop massively parallel processors method
1945  !!
1946  !!----------------------------------------------------------------------
1947  INTEGER :: info
1948  !!----------------------------------------------------------------------
1949  !
1950  CALL mppsync
1951  CALL mpi_finalize( info )
1952  !
1953  END SUBROUTINE mppstop
1954 
1955 
1956  SUBROUTINE mppobc( ptab, kd1, kd2, kl, kk, ktype, kij , kumout)
1957  !!----------------------------------------------------------------------
1958  !! *** routine mppobc ***
1959  !!
1960  !! ** Purpose : Message passing manadgement for open boundary
1961  !! conditions array
1962  !!
1963  !! ** Method : Use mppsend and mpprecv function for passing mask
1964  !! between processors following neighboring subdomains.
1965  !! domain parameters
1966  !! nlci : first dimension of the local subdomain
1967  !! nlcj : second dimension of the local subdomain
1968  !! nbondi : mark for "east-west local boundary"
1969  !! nbondj : mark for "north-south local boundary"
1970  !! noea : number for local neighboring processors
1971  !! nowe : number for local neighboring processors
1972  !! noso : number for local neighboring processors
1973  !! nono : number for local neighboring processors
1974  !!
1975  !!----------------------------------------------------------------------
1976  USE wrk_nemo ! Memory allocation
1977  !
1978  INTEGER , INTENT(in ) :: kd1, kd2 ! starting and ending indices
1979  INTEGER , INTENT(in ) :: kl ! index of open boundary
1980  INTEGER , INTENT(in ) :: kk ! vertical dimension
1981  INTEGER , INTENT(in ) :: ktype ! define north/south or east/west cdt
1982  ! ! = 1 north/south ; = 2 east/west
1983  INTEGER , INTENT(in ) :: kij ! horizontal dimension
1984  INTEGER , INTENT(in ) :: kumout ! ocean.output logical unit
1985  REAL(wp), INTENT(inout), DIMENSION(kij,kk) :: ptab ! variable array
1986  !
1987  INTEGER :: ji, jj, jk, jl ! dummy loop indices
1988  INTEGER :: iipt0, iipt1, ilpt1 ! local integers
1989  INTEGER :: ijpt0, ijpt1 ! - -
1990  INTEGER :: imigr, iihom, ijhom ! - -
1991  INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend
1992  INTEGER :: ml_stat(mpi_status_size) ! for key_mpi_isend
1993  REAL(wp), POINTER, DIMENSION(:,:) :: ztab ! temporary workspace
1994  !!----------------------------------------------------------------------
1995 
1996  CALL wrk_alloc( jpi,jpj, ztab )
1997 
1998  ! boundary condition initialization
1999  ! ---------------------------------
2000  ztab(:,:) = 0.e0
2001  !
2002  IF( ktype==1 ) THEN ! north/south boundaries
2003  iipt0 = max( 1, min(kd1 - nimpp+1, nlci ) )
2004  iipt1 = max( 0, min(kd2 - nimpp+1, nlci - 1 ) )
2005  ilpt1 = max( 1, min(kd2 - nimpp+1, nlci ) )
2006  ijpt0 = max( 1, min(kl - njmpp+1, nlcj ) )
2007  ijpt1 = max( 0, min(kl - njmpp+1, nlcj - 1 ) )
2008  ELSEIF( ktype==2 ) THEN ! east/west boundaries
2009  iipt0 = max( 1, min(kl - nimpp+1, nlci ) )
2010  iipt1 = max( 0, min(kl - nimpp+1, nlci - 1 ) )
2011  ijpt0 = max( 1, min(kd1 - njmpp+1, nlcj ) )
2012  ijpt1 = max( 0, min(kd2 - njmpp+1, nlcj - 1 ) )
2013  ilpt1 = max( 1, min(kd2 - njmpp+1, nlcj ) )
2014  ELSE
2015  WRITE(kumout, cform_err)
2016  WRITE(kumout,*) 'mppobc : bad ktype'
2017  CALL mppstop
2018  ENDIF
2019 
2020  ! Communication level by level
2021  ! ----------------------------
2022 !!gm Remark : this is very time consumming!!!
2023  ! ! ------------------------ !
2024  DO jk = 1, kk ! Loop over the levels !
2025  ! ! ------------------------ !
2026  !
2027  IF( ktype == 1 ) THEN ! north/south boundaries
2028  DO jj = ijpt0, ijpt1
2029  DO ji = iipt0, iipt1
2030  ztab(ji,jj) = ptab(ji,jk)
2031  END DO
2032  END DO
2033  ELSEIF( ktype == 2 ) THEN ! east/west boundaries
2034  DO jj = ijpt0, ijpt1
2035  DO ji = iipt0, iipt1
2036  ztab(ji,jj) = ptab(jj,jk)
2037  END DO
2038  END DO
2039  ENDIF
2040 
2041 
2042  ! 1. East and west directions
2043  ! ---------------------------
2044  !
2045  IF( nbondi /= 2 ) THEN ! Read Dirichlet lateral conditions
2046  iihom = nlci-nreci
2047  DO jl = 1, jpreci
2048  t2ew(:,jl,1) = ztab(jpreci+jl,:)
2049  t2we(:,jl,1) = ztab(iihom +jl,:)
2050  END DO
2051  ENDIF
2052  !
2053  ! ! Migrations
2054  imigr=jpreci*jpj
2055  !
2056  IF( nbondi == -1 ) THEN
2057  CALL mppsend( 2, t2we(1,1,1), imigr, noea, ml_req1 )
2058  CALL mpprecv( 1, t2ew(1,1,2), imigr, noea )
2059  IF(l_isend) CALL mpi_wait( ml_req1, ml_stat, ml_err )
2060  ELSEIF( nbondi == 0 ) THEN
2061  CALL mppsend( 1, t2ew(1,1,1), imigr, nowe, ml_req1 )
2062  CALL mppsend( 2, t2we(1,1,1), imigr, noea, ml_req2 )
2063  CALL mpprecv( 1, t2ew(1,1,2), imigr, noea )
2064  CALL mpprecv( 2, t2we(1,1,2), imigr, nowe )
2065  IF(l_isend) CALL mpi_wait( ml_req1, ml_stat, ml_err )
2066  IF(l_isend) CALL mpi_wait( ml_req2, ml_stat, ml_err )
2067  ELSEIF( nbondi == 1 ) THEN
2068  CALL mppsend( 1, t2ew(1,1,1), imigr, nowe, ml_req1 )
2069  CALL mpprecv( 2, t2we(1,1,2), imigr, nowe )
2070  IF(l_isend) CALL mpi_wait( ml_req1, ml_stat, ml_err )
2071  ENDIF
2072  !
2073  ! ! Write Dirichlet lateral conditions
2074  iihom = nlci-jpreci
2075  !
2076  IF( nbondi == 0 .OR. nbondi == 1 ) THEN
2077  DO jl = 1, jpreci
2078  ztab(jl,:) = t2we(:,jl,2)
2079  END DO
2080  ENDIF
2081  IF( nbondi == -1 .OR. nbondi == 0 ) THEN
2082  DO jl = 1, jpreci
2083  ztab(iihom+jl,:) = t2ew(:,jl,2)
2084  END DO
2085  ENDIF
2086 
2087 
2088  ! 2. North and south directions
2089  ! -----------------------------
2090  !
2091  IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions
2092  ijhom = nlcj-nrecj
2093  DO jl = 1, jprecj
2094  t2sn(:,jl,1) = ztab(:,ijhom +jl)
2095  t2ns(:,jl,1) = ztab(:,jprecj+jl)
2096  END DO
2097  ENDIF
2098  !
2099  ! ! Migrations
2100  imigr = jprecj * jpi
2101  !
2102  IF( nbondj == -1 ) THEN
2103  CALL mppsend( 4, t2sn(1,1,1), imigr, nono, ml_req1 )
2104  CALL mpprecv( 3, t2ns(1,1,2), imigr, nono )
2105  IF(l_isend) CALL mpi_wait( ml_req1, ml_stat, ml_err )
2106  ELSEIF( nbondj == 0 ) THEN
2107  CALL mppsend( 3, t2ns(1,1,1), imigr, noso, ml_req1 )
2108  CALL mppsend( 4, t2sn(1,1,1), imigr, nono, ml_req2 )
2109  CALL mpprecv( 3, t2ns(1,1,2), imigr, nono )
2110  CALL mpprecv( 4, t2sn(1,1,2), imigr, noso )
2111  IF( l_isend ) CALL mpi_wait( ml_req1, ml_stat, ml_err )
2112  IF( l_isend ) CALL mpi_wait( ml_req2, ml_stat, ml_err )
2113  ELSEIF( nbondj == 1 ) THEN
2114  CALL mppsend( 3, t2ns(1,1,1), imigr, noso, ml_req1 )
2115  CALL mpprecv( 4, t2sn(1,1,2), imigr, noso)
2116  IF( l_isend ) CALL mpi_wait( ml_req1, ml_stat, ml_err )
2117  ENDIF
2118  !
2119  ! ! Write Dirichlet lateral conditions
2120  ijhom = nlcj - jprecj
2121  IF( nbondj == 0 .OR. nbondj == 1 ) THEN
2122  DO jl = 1, jprecj
2123  ztab(:,jl) = t2sn(:,jl,2)
2124  END DO
2125  ENDIF
2126  IF( nbondj == 0 .OR. nbondj == -1 ) THEN
2127  DO jl = 1, jprecj
2128  ztab(:,ijhom+jl) = t2ns(:,jl,2)
2129  END DO
2130  ENDIF
2131  IF( ktype==1 .AND. kd1 <= jpi+nimpp-1 .AND. nimpp <= kd2 ) THEN
2132  DO jj = ijpt0, ijpt1 ! north/south boundaries
2133  DO ji = iipt0,ilpt1
2134  ptab(ji,jk) = ztab(ji,jj)
2135  END DO
2136  END DO
2137  ELSEIF( ktype==2 .AND. kd1 <= jpj+njmpp-1 .AND. njmpp <= kd2 ) THEN
2138  DO jj = ijpt0, ilpt1 ! east/west boundaries
2139  DO ji = iipt0,iipt1
2140  ptab(jj,jk) = ztab(ji,jj)
2141  END DO
2142  END DO
2143  ENDIF
2144  !
2145  END DO
2146  !
2147  CALL wrk_dealloc( jpi,jpj, ztab )
2148  !
2149  END SUBROUTINE mppobc
2150 
2151 
2152 ! SUBROUTINE mpp_comm_free( kcom )
2153 ! !!----------------------------------------------------------------------
2154 ! !!----------------------------------------------------------------------
2155 ! INTEGER, INTENT(in) :: kcom
2156 ! !!
2157 ! INTEGER :: ierr
2158 ! !!----------------------------------------------------------------------
2159 ! !
2160 ! CALL MPI_COMM_FREE(kcom, ierr)
2161 ! !
2162 ! END SUBROUTINE mpp_comm_free
2163 
2164 
2165  SUBROUTINE mpp_ini_ice( pindic, kumout )
2166  !!----------------------------------------------------------------------
2167  !! *** routine mpp_ini_ice ***
2168  !!
2169  !! ** Purpose : Initialize special communicator for ice areas
2170  !! condition together with global variables needed in the ddmpp folding
2171  !!
2172  !! ** Method : - Look for ice processors in ice routines
2173  !! - Put their number in nrank_ice
2174  !! - Create groups for the world processors and the ice processors
2175  !! - Create a communicator for ice processors
2176  !!
2177  !! ** output
2178  !! njmppmax = njmpp for northern procs
2179  !! ndim_rank_ice = number of processors with ice
2180  !! nrank_ice (ndim_rank_ice) = ice processors
2181  !! ngrp_iworld = group ID for the world processors
2182  !! ngrp_ice = group ID for the ice processors
2183  !! ncomm_ice = communicator for the ice procs.
2184  !! n_ice_root = number (in the world) of proc 0 in the ice comm.
2185  !!
2186  !!----------------------------------------------------------------------
2187  INTEGER, INTENT(in) :: pindic
2188  INTEGER, INTENT(in) :: kumout ! ocean.output logical unit
2189  !!
2190  INTEGER :: jjproc
2191  INTEGER :: ii, ierr
2192  INTEGER, ALLOCATABLE, DIMENSION(:) :: kice
2193  INTEGER, ALLOCATABLE, DIMENSION(:) :: zwork
2194  !!----------------------------------------------------------------------
2195  !
2196  ! Since this is just an init routine and these arrays are of length jpnij
2197  ! then don't use wrk_nemo module - just allocate and deallocate.
2198  ALLOCATE( kice(jpnij), zwork(jpnij), stat=ierr )
2199  IF( ierr /= 0 ) THEN
2200  WRITE(kumout, cform_err)
2201  WRITE(kumout,*) 'mpp_ini_ice : failed to allocate 2, 1D arrays (jpnij in length)'
2202  CALL mppstop
2203  ENDIF
2204 
2205  ! Look for how many procs with sea-ice
2206  !
2207  kice = 0
2208  DO jjproc = 1, jpnij
2209  IF( jjproc == narea .AND. pindic .GT. 0 ) kice(jjproc) = 1
2210  END DO
2211  !
2212  zwork = 0
2213  CALL mpi_allreduce( kice, zwork, jpnij, mpi_integer, mpi_sum, mpi_comm_opa, ierr )
2214  ndim_rank_ice = sum( zwork )
2215 
2216  ! Allocate the right size to nrank_north
2217  IF( ALLOCATED ( nrank_ice ) ) DEALLOCATE( nrank_ice )
2218  ALLOCATE( nrank_ice(ndim_rank_ice) )
2219  !
2220  ii = 0
2221  nrank_ice = 0
2222  DO jjproc = 1, jpnij
2223  IF( zwork(jjproc) == 1) THEN
2224  ii = ii + 1
2225  nrank_ice(ii) = jjproc -1
2226  ENDIF
2227  END DO
2228 
2229  ! Create the world group
2230  CALL mpi_comm_group( mpi_comm_opa, ngrp_iworld, ierr )
2231 
2232  ! Create the ice group from the world group
2233  CALL mpi_group_incl( ngrp_iworld, ndim_rank_ice, nrank_ice, ngrp_ice, ierr )
2234 
2235  ! Create the ice communicator , ie the pool of procs with sea-ice
2236  CALL mpi_comm_create( mpi_comm_opa, ngrp_ice, ncomm_ice, ierr )
2237 
2238  ! Find proc number in the world of proc 0 in the north
2239  ! The following line seems to be useless, we just comment & keep it as reminder
2240  ! CALL MPI_GROUP_TRANSLATE_RANKS(ngrp_ice,1,0,ngrp_iworld,n_ice_root,ierr)
2241  !
2242  CALL mpi_group_free(ngrp_ice, ierr)
2243  CALL mpi_group_free(ngrp_iworld, ierr)
2244 
2245  DEALLOCATE(kice, zwork)
2246  !
2247  END SUBROUTINE mpp_ini_ice
2248 
2249 
2250  SUBROUTINE mpp_ini_znl( kumout )
2251  !!----------------------------------------------------------------------
2252  !! *** routine mpp_ini_znl ***
2253  !!
2254  !! ** Purpose : Initialize special communicator for computing zonal sum
2255  !!
2256  !! ** Method : - Look for processors in the same row
2257  !! - Put their number in nrank_znl
2258  !! - Create group for the znl processors
2259  !! - Create a communicator for znl processors
2260  !! - Determine if processor should write znl files
2261  !!
2262  !! ** output
2263  !! ndim_rank_znl = number of processors on the same row
2264  !! ngrp_znl = group ID for the znl processors
2265  !! ncomm_znl = communicator for the ice procs.
2266  !! n_znl_root = number (in the world) of proc 0 in the ice comm.
2267  !!
2268  !!----------------------------------------------------------------------
2269  INTEGER, INTENT(in) :: kumout ! ocean.output logical units
2270  !
2271  INTEGER :: jproc ! dummy loop integer
2272  INTEGER :: ierr, ii ! local integer
2273  INTEGER, ALLOCATABLE, DIMENSION(:) :: kwork
2274  !!----------------------------------------------------------------------
2275  !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - ngrp_world : ', ngrp_world
2276  !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - mpi_comm_world : ', mpi_comm_world
2277  !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - mpi_comm_opa : ', mpi_comm_opa
2278  !
2279  ALLOCATE( kwork(jpnij), stat=ierr )
2280  IF( ierr /= 0 ) THEN
2281  WRITE(kumout, cform_err)
2282  WRITE(kumout,*) 'mpp_ini_znl : failed to allocate 1D array of length jpnij'
2283  CALL mppstop
2284  ENDIF
2285 
2286  IF( jpnj == 1 ) THEN
2287  ngrp_znl = ngrp_world
2288  ncomm_znl = mpi_comm_opa
2289  ELSE
2290  !
2291  CALL mpi_allgather( njmpp, 1, mpi_integer, kwork, 1, mpi_integer, mpi_comm_opa, ierr )
2292  !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - kwork pour njmpp : ', kwork
2293  !-$$ CALL flush(numout)
2294  !
2295  ! Count number of processors on the same row
2296  ndim_rank_znl = 0
2297  DO jproc=1,jpnij
2298  IF ( kwork(jproc) == njmpp ) THEN
2299  ndim_rank_znl = ndim_rank_znl + 1
2300  ENDIF
2301  END DO
2302  !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - ndim_rank_znl : ', ndim_rank_znl
2303  !-$$ CALL flush(numout)
2304  ! Allocate the right size to nrank_znl
2305  IF (ALLOCATED (nrank_znl)) DEALLOCATE(nrank_znl)
2306  ALLOCATE(nrank_znl(ndim_rank_znl))
2307  ii = 0
2308  nrank_znl(:) = 0
2309  DO jproc=1,jpnij
2310  IF ( kwork(jproc) == njmpp) THEN
2311  ii = ii + 1
2312  nrank_znl(ii) = jproc -1
2313  ENDIF
2314  END DO
2315  !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - nrank_znl : ', nrank_znl
2316  !-$$ CALL flush(numout)
2317 
2318  ! Create the opa group
2319  CALL mpi_comm_group(mpi_comm_opa,ngrp_opa,ierr)
2320  !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - ngrp_opa : ', ngrp_opa
2321  !-$$ CALL flush(numout)
2322 
2323  ! Create the znl group from the opa group
2324  CALL mpi_group_incl( ngrp_opa, ndim_rank_znl, nrank_znl, ngrp_znl, ierr )
2325  !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - ngrp_znl ', ngrp_znl
2326  !-$$ CALL flush(numout)
2327 
2328  ! Create the znl communicator from the opa communicator, ie the pool of procs in the same row
2329  CALL mpi_comm_create( mpi_comm_opa, ngrp_znl, ncomm_znl, ierr )
2330  !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - ncomm_znl ', ncomm_znl
2331  !-$$ CALL flush(numout)
2332  !
2333  END IF
2334 
2335  ! Determines if processor if the first (starting from i=1) on the row
2336  IF ( jpni == 1 ) THEN
2337  l_znl_root = .true.
2338  ELSE
2339  l_znl_root = .false.
2340  kwork(1) = nimpp
2341  CALL mpp_min( kwork(1), kcom = ncomm_znl)
2342  IF ( nimpp == kwork(1)) l_znl_root = .true.
2343  END IF
2344 
2345  DEALLOCATE(kwork)
2346 
2347  END SUBROUTINE mpp_ini_znl
2348 
2349 
2350  SUBROUTINE mpp_ini_north
2351  !!----------------------------------------------------------------------
2352  !! *** routine mpp_ini_north ***
2353  !!
2354  !! ** Purpose : Initialize special communicator for north folding
2355  !! condition together with global variables needed in the mpp folding
2356  !!
2357  !! ** Method : - Look for northern processors
2358  !! - Put their number in nrank_north
2359  !! - Create groups for the world processors and the north processors
2360  !! - Create a communicator for northern processors
2361  !!
2362  !! ** output
2363  !! njmppmax = njmpp for northern procs
2364  !! ndim_rank_north = number of processors in the northern line
2365  !! nrank_north (ndim_rank_north) = number of the northern procs.
2366  !! ngrp_world = group ID for the world processors
2367  !! ngrp_north = group ID for the northern processors
2368  !! ncomm_north = communicator for the northern procs.
2369  !! north_root = number (in the world) of proc 0 in the northern comm.
2370  !!
2371  !!----------------------------------------------------------------------
2372  INTEGER :: ierr
2373  INTEGER :: jjproc
2374  INTEGER :: ii, ji
2375  !!----------------------------------------------------------------------
2376  !
2377  njmppmax = maxval( njmppt )
2378  !
2379  ! Look for how many procs on the northern boundary
2380  ndim_rank_north = 0
2381  DO jjproc = 1, jpnij
2382  IF( njmppt(jjproc) == njmppmax ) ndim_rank_north = ndim_rank_north + 1
2383  END DO
2384  !
2385  ! Allocate the right size to nrank_north
2386  IF (ALLOCATED (nrank_north)) DEALLOCATE(nrank_north)
2387  ALLOCATE( nrank_north(ndim_rank_north) )
2388 
2389  ! Fill the nrank_north array with proc. number of northern procs.
2390  ! Note : the rank start at 0 in MPI
2391  ii = 0
2392  DO ji = 1, jpnij
2393  IF ( njmppt(ji) == njmppmax ) THEN
2394  ii=ii+1
2395  nrank_north(ii)=ji-1
2396  END IF
2397  END DO
2398  !
2399  ! create the world group
2400  CALL mpi_comm_group( mpi_comm_opa, ngrp_world, ierr )
2401  !
2402  ! Create the North group from the world group
2403  CALL mpi_group_incl( ngrp_world, ndim_rank_north, nrank_north, ngrp_north, ierr )
2404  !
2405  ! Create the North communicator , ie the pool of procs in the north group
2406  CALL mpi_comm_create( mpi_comm_opa, ngrp_north, ncomm_north, ierr )
2407  !
2408  END SUBROUTINE mpp_ini_north
2409 
2410 
2411  SUBROUTINE mpp_lbc_north_3d( pt3d, cd_type, psgn )
2412  !!---------------------------------------------------------------------
2413  !! *** routine mpp_lbc_north_3d ***
2414  !!
2415  !! ** Purpose : Ensure proper north fold horizontal bondary condition
2416  !! in mpp configuration in case of jpn1 > 1
2417  !!
2418  !! ** Method : North fold condition and mpp with more than one proc
2419  !! in i-direction require a specific treatment. We gather
2420  !! the 4 northern lines of the global domain on 1 processor
2421  !! and apply lbc north-fold on this sub array. Then we
2422  !! scatter the north fold array back to the processors.
2423  !!
2424  !!----------------------------------------------------------------------
2425  REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT(inout) :: pt3d ! 3D array on which the b.c. is applied
2426  CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt3d grid-points
2427  ! ! = T , U , V , F or W gridpoints
2428  REAL(wp) , INTENT(in ) :: psgn ! = -1. the sign change across the north fold
2429  !! ! = 1. , the sign is kept
2430  INTEGER :: ji, jj, jr
2431  INTEGER :: ierr, itaille, ildi, ilei, iilb
2432  INTEGER :: ijpj, ijpjm1, ij, iproc
2433  INTEGER, DIMENSION (jpmaxngh) :: ml_req_nf ! for mpi_isend when avoiding mpi_allgather
2434  INTEGER :: ml_err ! for mpi_isend when avoiding mpi_allgather
2435  INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for mpi_isend when avoiding mpi_allgather
2436  !!----------------------------------------------------------------------
2437  !
2438  ijpj = 4
2439  ityp = -1
2440  ijpjm1 = 3
2441  ztab(:,:,:) = 0.e0
2442  !
2443  DO jj = nlcj - ijpj +1, nlcj ! put in znorthloc the last 4 jlines of pt3d
2444  ij = jj - nlcj + ijpj
2445  znorthloc(:,ij,:) = pt3d(:,jj,:)
2446  END DO
2447  !
2448  ! ! Build in procs of ncomm_north the znorthgloio
2449  itaille = jpi * jpk * ijpj
2450  IF ( l_north_nogather ) THEN
2451  !
2452  ! Avoid the use of mpi_allgather by exchanging only with the processes already identified
2453  ! (in nemo_northcomms) as being involved in this process' northern boundary exchange
2454  !
2455  DO jj = nlcj-ijpj+1, nlcj ! First put local values into the global array
2456  ij = jj - nlcj + ijpj
2457  DO ji = 1, nlci
2458  ztab(ji+nimpp-1,ij,:) = pt3d(ji,jj,:)
2459  END DO
2460  END DO
2461 
2462  !
2463  ! Set the exchange type in order to access the correct list of active neighbours
2464  !
2465  SELECT CASE ( cd_type )
2466  CASE ( 'T' , 'W' )
2467  ityp = 1
2468  CASE ( 'U' )
2469  ityp = 2
2470  CASE ( 'V' )
2471  ityp = 3
2472  CASE ( 'F' )
2473  ityp = 4
2474  CASE ( 'I' )
2475  ityp = 5
2476  CASE default
2477  ityp = -1 ! Set a default value for unsupported types which
2478  ! will cause a fallback to the mpi_allgather method
2479  END SELECT
2480  IF ( ityp .gt. 0 ) THEN
2481 
2482  DO jr = 1,nsndto(ityp)
2483  CALL mppsend(5, znorthloc, itaille, isendto(jr,ityp), ml_req_nf(jr) )
2484  END DO
2485  DO jr = 1,nsndto(ityp)
2486  CALL mpprecv(5, zfoldwk, itaille, isendto(jr,ityp))
2487  iproc = isendto(jr,ityp) + 1
2488  ildi = nldit(iproc)
2489  ilei = nleit(iproc)
2490  iilb = nimppt(iproc)
2491  DO jj = 1, ijpj
2492  DO ji = ildi, ilei
2493  ztab(ji+iilb-1,jj,:) = zfoldwk(ji,jj,:)
2494  END DO
2495  END DO
2496  END DO
2497  IF (l_isend) THEN
2498  DO jr = 1,nsndto(ityp)
2499  CALL mpi_wait(ml_req_nf(jr), ml_stat, ml_err)
2500  END DO
2501  ENDIF
2502 
2503  ENDIF
2504 
2505  ENDIF
2506 
2507  IF ( ityp .lt. 0 ) THEN
2508  CALL mpi_allgather( znorthloc , itaille, mpi_double_precision, &
2509  & znorthgloio, itaille, mpi_double_precision, ncomm_north, ierr )
2510  !
2511  DO jr = 1, ndim_rank_north ! recover the global north array
2512  iproc = nrank_north(jr) + 1
2513  ildi = nldit(iproc)
2514  ilei = nleit(iproc)
2515  iilb = nimppt(iproc)
2516  DO jj = 1, ijpj
2517  DO ji = ildi, ilei
2518  ztab(ji+iilb-1,jj,:) = znorthgloio(ji,jj,:,jr)
2519  END DO
2520  END DO
2521  END DO
2522  ENDIF
2523  !
2524  ! The ztab array has been either:
2525  ! a. Fully populated by the mpi_allgather operation or
2526  ! b. Had the active points for this domain and northern neighbours populated
2527  ! by peer to peer exchanges
2528  ! Either way the array may be folded by lbc_nfd and the result for the span of
2529  ! this domain will be identical.
2530  !
2531  CALL lbc_nfd( ztab, cd_type, psgn ) ! North fold boundary condition
2532  !
2533  DO jj = nlcj-ijpj+1, nlcj ! Scatter back to pt3d
2534  ij = jj - nlcj + ijpj
2535  DO ji= 1, nlci
2536  pt3d(ji,jj,:) = ztab(ji+nimpp-1,ij,:)
2537  END DO
2538  END DO
2539  !
2540  END SUBROUTINE mpp_lbc_north_3d
2541 
2542 
2543  SUBROUTINE mpp_lbc_north_2d( pt2d, cd_type, psgn)
2544  !!---------------------------------------------------------------------
2545  !! *** routine mpp_lbc_north_2d ***
2546  !!
2547  !! ** Purpose : Ensure proper north fold horizontal bondary condition
2548  !! in mpp configuration in case of jpn1 > 1 (for 2d array )
2549  !!
2550  !! ** Method : North fold condition and mpp with more than one proc
2551  !! in i-direction require a specific treatment. We gather
2552  !! the 4 northern lines of the global domain on 1 processor
2553  !! and apply lbc north-fold on this sub array. Then we
2554  !! scatter the north fold array back to the processors.
2555  !!
2556  !!----------------------------------------------------------------------
2557  REAL(wp), DIMENSION(jpi,jpj), INTENT(inout) :: pt2d ! 3D array on which the b.c. is applied
2558  CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt3d grid-points
2559  ! ! = T , U , V , F or W gridpoints
2560  REAL(wp) , INTENT(in ) :: psgn ! = -1. the sign change across the north fold
2561  !! ! = 1. , the sign is kept
2562  INTEGER :: ji, jj, jr
2563  INTEGER :: ierr, itaille, ildi, ilei, iilb
2564  INTEGER :: ijpj, ijpjm1, ij, iproc
2565  INTEGER, DIMENSION (jpmaxngh) :: ml_req_nf ! for mpi_isend when avoiding mpi_allgather
2566  INTEGER :: ml_err ! for mpi_isend when avoiding mpi_allgather
2567  INTEGER, DIMENSION(MPI_STATUS_SIZE):: ml_stat ! for mpi_isend when avoiding mpi_allgather
2568  !!----------------------------------------------------------------------
2569  !
2570  ijpj = 4
2571  ityp = -1
2572  ijpjm1 = 3
2573  ztab_2d(:,:) = 0.e0
2574  !
2575  DO jj = nlcj-ijpj+1, nlcj ! put in znorthloc_2d the last 4 jlines of pt2d
2576  ij = jj - nlcj + ijpj
2577  znorthloc_2d(:,ij) = pt2d(:,jj)
2578  END DO
2579 
2580  ! ! Build in procs of ncomm_north the znorthgloio_2d
2581  itaille = jpi * ijpj
2582  IF ( l_north_nogather ) THEN
2583  !
2584  ! Avoid the use of mpi_allgather by exchanging only with the processes already identified
2585  ! (in nemo_northcomms) as being involved in this process' northern boundary exchange
2586  !
2587  DO jj = nlcj-ijpj+1, nlcj ! First put local values into the global array
2588  ij = jj - nlcj + ijpj
2589  DO ji = 1, nlci
2590  ztab_2d(ji+nimpp-1,ij) = pt2d(ji,jj)
2591  END DO
2592  END DO
2593 
2594  !
2595  ! Set the exchange type in order to access the correct list of active neighbours
2596  !
2597  SELECT CASE ( cd_type )
2598  CASE ( 'T' , 'W' )
2599  ityp = 1
2600  CASE ( 'U' )
2601  ityp = 2
2602  CASE ( 'V' )
2603  ityp = 3
2604  CASE ( 'F' )
2605  ityp = 4
2606  CASE ( 'I' )
2607  ityp = 5
2608  CASE default
2609  ityp = -1 ! Set a default value for unsupported types which
2610  ! will cause a fallback to the mpi_allgather method
2611  END SELECT
2612 
2613  IF ( ityp .gt. 0 ) THEN
2614 
2615  DO jr = 1,nsndto(ityp)
2616  CALL mppsend(5, znorthloc_2d, itaille, isendto(jr,ityp), ml_req_nf(jr) )
2617  END DO
2618  DO jr = 1,nsndto(ityp)
2619  CALL mpprecv(5, zfoldwk_2d, itaille, isendto(jr,ityp))
2620  iproc = isendto(jr,ityp) + 1
2621  ildi = nldit(iproc)
2622  ilei = nleit(iproc)
2623  iilb = nimppt(iproc)
2624  DO jj = 1, ijpj
2625  DO ji = ildi, ilei
2626  ztab_2d(ji+iilb-1,jj) = zfoldwk_2d(ji,jj)
2627  END DO
2628  END DO
2629  END DO
2630  IF (l_isend) THEN
2631  DO jr = 1,nsndto(ityp)
2632  CALL mpi_wait(ml_req_nf(jr), ml_stat, ml_err)
2633  END DO
2634  ENDIF
2635 
2636  ENDIF
2637 
2638  ENDIF
2639 
2640  IF ( ityp .lt. 0 ) THEN
2641  CALL mpi_allgather( znorthloc_2d , itaille, mpi_double_precision, &
2642  & znorthgloio_2d, itaille, mpi_double_precision, ncomm_north, ierr )
2643  !
2644  DO jr = 1, ndim_rank_north ! recover the global north array
2645  iproc = nrank_north(jr) + 1
2646  ildi = nldit(iproc)
2647  ilei = nleit(iproc)
2648  iilb = nimppt(iproc)
2649  DO jj = 1, ijpj
2650  DO ji = ildi, ilei
2651  ztab_2d(ji+iilb-1,jj) = znorthgloio_2d(ji,jj,jr)
2652  END DO
2653  END DO
2654  END DO
2655  ENDIF
2656  !
2657  ! The ztab array has been either:
2658  ! a. Fully populated by the mpi_allgather operation or
2659  ! b. Had the active points for this domain and northern neighbours populated
2660  ! by peer to peer exchanges
2661  ! Either way the array may be folded by lbc_nfd and the result for the span of
2662  ! this domain will be identical.
2663  !
2664  CALL lbc_nfd( ztab_2d, cd_type, psgn ) ! North fold boundary condition
2665  !
2666  !
2667  DO jj = nlcj-ijpj+1, nlcj ! Scatter back to pt2d
2668  ij = jj - nlcj + ijpj
2669  DO ji = 1, nlci
2670  pt2d(ji,jj) = ztab_2d(ji+nimpp-1,ij)
2671  END DO
2672  END DO
2673  !
2674  END SUBROUTINE mpp_lbc_north_2d
2675 
2676 
2677  SUBROUTINE mpp_lbc_north_e( pt2d, cd_type, psgn)
2678  !!---------------------------------------------------------------------
2679  !! *** routine mpp_lbc_north_2d ***
2680  !!
2681  !! ** Purpose : Ensure proper north fold horizontal bondary condition
2682  !! in mpp configuration in case of jpn1 > 1 and for 2d
2683  !! array with outer extra halo
2684  !!
2685  !! ** Method : North fold condition and mpp with more than one proc
2686  !! in i-direction require a specific treatment. We gather
2687  !! the 4+2*jpr2dj northern lines of the global domain on 1
2688  !! processor and apply lbc north-fold on this sub array.
2689  !! Then we scatter the north fold array back to the processors.
2690  !!
2691  !!----------------------------------------------------------------------
2692  REAL(wp), DIMENSION(1-jpr2di:jpi+jpr2di,1-jpr2dj:jpj+jpr2dj), INTENT(inout) :: pt2d ! 2D array with extra halo
2693  CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt3d grid-points
2694  ! ! = T , U , V , F or W -points
2695  REAL(wp) , INTENT(in ) :: psgn ! = -1. the sign change across the
2696  !! ! north fold, = 1. otherwise
2697  INTEGER :: ji, jj, jr
2698  INTEGER :: ierr, itaille, ildi, ilei, iilb
2699  INTEGER :: ijpj, ij, iproc
2700  !!----------------------------------------------------------------------
2701  !
2702  ijpj=4
2703  ztab_e(:,:) = 0.e0
2704 
2705  ij=0
2706  ! put in znorthloc_e the last 4 jlines of pt2d
2707  DO jj = nlcj - ijpj + 1 - jpr2dj, nlcj +jpr2dj
2708  ij = ij + 1
2709  DO ji = 1, jpi
2710  znorthloc_e(ji,ij)=pt2d(ji,jj)
2711  END DO
2712  END DO
2713  !
2714  itaille = jpi * ( ijpj + 2 * jpr2dj )
2715  CALL mpi_allgather( znorthloc_e(1,1) , itaille, mpi_double_precision, &
2716  & znorthgloio_e(1,1,1), itaille, mpi_double_precision, ncomm_north, ierr )
2717  !
2718  DO jr = 1, ndim_rank_north ! recover the global north array
2719  iproc = nrank_north(jr) + 1
2720  ildi = nldit(iproc)
2721  ilei = nleit(iproc)
2722  iilb = nimppt(iproc)
2723  DO jj = 1, ijpj+2*jpr2dj
2724  DO ji = ildi, ilei
2725  ztab_e(ji+iilb-1,jj) = znorthgloio_e(ji,jj,jr)
2726  END DO
2727  END DO
2728  END DO
2729 
2730 
2731  ! 2. North-Fold boundary conditions
2732  ! ----------------------------------
2733  CALL lbc_nfd( ztab_e(:,:), cd_type, psgn, pr2dj = jpr2dj )
2734 
2735  ij = jpr2dj
2736  !! Scatter back to pt2d
2737  DO jj = nlcj - ijpj + 1 , nlcj +jpr2dj
2738  ij = ij +1
2739  DO ji= 1, nlci
2740  pt2d(ji,jj) = ztab_e(ji+nimpp-1,ij)
2741  END DO
2742  END DO
2743  !
2744  END SUBROUTINE mpp_lbc_north_e
2745 
2746 
2747  SUBROUTINE mpi_init_opa( ldtxt, ksft, code )
2748  !!---------------------------------------------------------------------
2749  !! *** routine mpp_init.opa ***
2750  !!
2751  !! ** Purpose :: export and attach a MPI buffer for bsend
2752  !!
2753  !! ** Method :: define buffer size in namelist, if 0 no buffer attachment
2754  !! but classical mpi_init
2755  !!
2756  !! History :: 01/11 :: IDRIS initial version for IBM only
2757  !! 08/04 :: R. Benshila, generalisation
2758  !!---------------------------------------------------------------------
2759  CHARACTER(len=*),DIMENSION(:), INTENT( out) :: ldtxt
2760  INTEGER , INTENT(inout) :: ksft
2761  INTEGER , INTENT( out) :: code
2762  INTEGER :: ierr, ji
2763  LOGICAL :: mpi_was_called
2764  !!---------------------------------------------------------------------
2765  !
2766  CALL mpi_initialized( mpi_was_called, code ) ! MPI initialization
2767  IF ( code /= mpi_success ) THEN
2768  DO ji = 1, SIZE(ldtxt)
2769  IF( trim(ldtxt(ji)) /= '' ) WRITE(*,*) ldtxt(ji) ! control print of mynode
2770  END DO
2771  WRITE(*, cform_err)
2772  WRITE(*, *) ' lib_mpp: Error in routine mpi_initialized'
2773  CALL mpi_abort( mpi_comm_world, code, ierr )
2774  ENDIF
2775  !
2776  IF( .NOT. mpi_was_called ) THEN
2777  CALL mpi_init( code )
2778  CALL mpi_comm_dup( mpi_comm_world, mpi_comm_opa, code )
2779  IF ( code /= mpi_success ) THEN
2780  DO ji = 1, SIZE(ldtxt)
2781  IF( trim(ldtxt(ji)) /= '' ) WRITE(*,*) ldtxt(ji) ! control print of mynode
2782  END DO
2783  WRITE(*, cform_err)
2784  WRITE(*, *) ' lib_mpp: Error in routine mpi_comm_dup'
2785  CALL mpi_abort( mpi_comm_world, code, ierr )
2786  ENDIF
2787  ENDIF
2788  !
2789  IF( nn_buffer > 0 ) THEN
2790  WRITE(ldtxt(ksft),*) 'mpi_bsend, buffer allocation of : ', nn_buffer ; ksft = ksft + 1
2791  ! Buffer allocation and attachment
2792  ALLOCATE( tampon(nn_buffer), stat = ierr )
2793  IF( ierr /= 0 ) THEN
2794  DO ji = 1, SIZE(ldtxt)
2795  IF( trim(ldtxt(ji)) /= '' ) WRITE(*,*) ldtxt(ji) ! control print of mynode
2796  END DO
2797  WRITE(*, cform_err)
2798  WRITE(*, *) ' lib_mpp: Error in ALLOCATE', ierr
2799  CALL mpi_abort( mpi_comm_world, code, ierr )
2800  END IF
2801  CALL mpi_buffer_attach( tampon, nn_buffer, code )
2802  ENDIF
2803  !
2804  END SUBROUTINE mpi_init_opa
2805 
2806  SUBROUTINE ddpdd_mpi (ydda, yddb, ilen, itype)
2807  !!---------------------------------------------------------------------
2808  !! Routine DDPDD_MPI: used by reduction operator MPI_SUMDD
2809  !!
2810  !! Modification of original codes written by David H. Bailey
2811  !! This subroutine computes yddb(i) = ydda(i)+yddb(i)
2812  !!---------------------------------------------------------------------
2813  INTEGER, INTENT(in) :: ilen, itype
2814  COMPLEX(wp), DIMENSION(ilen), INTENT(in) :: ydda
2815  COMPLEX(wp), DIMENSION(ilen), INTENT(inout) :: yddb
2816  !
2817  REAL(wp) :: zerr, zt1, zt2 ! local work variables
2818  INTEGER :: ji, ztmp ! local scalar
2819 
2820  ztmp = itype ! avoid compilation warning
2821 
2822  DO ji=1,ilen
2823  ! Compute ydda + yddb using Knuth's trick.
2824  zt1 = real(ydda(ji)) + real(yddb(ji))
2825  zerr = zt1 - real(ydda(ji))
2826  zt2 = ((real(yddb(ji)) - zerr) + (real(ydda(ji)) - (zt1 - zerr))) &
2827  + aimag(ydda(ji)) + aimag(yddb(ji))
2828 
2829  ! The result is zt1 + zt2, after normalization.
2830  yddb(ji) = cmplx( zt1 + zt2, zt2 - ((zt1 + zt2) - zt1),wp )
2831  END DO
2832 
2833  END SUBROUTINE ddpdd_mpi
2834 #endif
2835 #else
2836 #if ! defined in_surfex
2837  !!----------------------------------------------------------------------
2838  !! Default case: Dummy module share memory computing
2839  !!----------------------------------------------------------------------
2840  USE in_out_manager
2841 
2842  INTERFACE mpp_sum
2844  END INTERFACE
2845  INTERFACE mpp_max
2846  MODULE PROCEDURE mppmax_a_int, mppmax_int, mppmax_a_real, mppmax_real
2847  END INTERFACE
2848  INTERFACE mpp_min
2849  MODULE PROCEDURE mppmin_a_int, mppmin_int, mppmin_a_real, mppmin_real
2850  END INTERFACE
2851  INTERFACE mppobc
2852  MODULE PROCEDURE mppobc_1d, mppobc_2d, mppobc_3d, mppobc_4d
2853  END INTERFACE
2854  INTERFACE mpp_minloc
2855  MODULE PROCEDURE mpp_minloc2d ,mpp_minloc3d
2856  END INTERFACE
2857  INTERFACE mpp_maxloc
2858  MODULE PROCEDURE mpp_maxloc2d ,mpp_maxloc3d
2859  END INTERFACE
2860 
2861  LOGICAL, PUBLIC, PARAMETER :: lk_mpp = .false. !: mpp flag
2862  LOGICAL, PUBLIC :: ln_nnogather = .false. !: namelist control of northfold comms (needed here in case "key_mpp_mpi" is not used)
2863  INTEGER :: ncomm_ice
2864  !!----------------------------------------------------------------------
2865  CONTAINS
2866 
2867  INTEGER FUNCTION lib_mpp_alloc(kumout) ! Dummy function
2868  INTEGER, INTENT(in) :: kumout
2869  lib_mpp_alloc = 0
2870  END FUNCTION lib_mpp_alloc
2871 
2872  FUNCTION mynode( ldtxt, kumnam, kstop, localComm ) RESULT (function_value)
2873  INTEGER, OPTIONAL , INTENT(in ) :: localcomm
2874  CHARACTER(len=*),DIMENSION(:) :: ldtxt
2875  INTEGER :: kumnam, kstop
2876  IF( present( localcomm ) .OR. .NOT.present( localcomm ) ) function_value = 0
2877  IF( .false. ) ldtxt(:) = 'never done'
2878  END FUNCTION mynode
2879 
2880  SUBROUTINE mppsync ! Dummy routine
2881  END SUBROUTINE mppsync
2882 
2883  SUBROUTINE mpp_sum_as( parr, kdim, kcom ) ! Dummy routine
2884  REAL , DIMENSION(:) :: parr
2885  INTEGER :: kdim
2886  INTEGER, OPTIONAL :: kcom
2887  WRITE(*,*) 'mpp_sum_as: You should not have seen this print! error?', kdim, parr(1), kcom
2888  END SUBROUTINE mpp_sum_as
2889 
2890  SUBROUTINE mpp_sum_a2s( parr, kdim, kcom ) ! Dummy routine
2891  REAL , DIMENSION(:,:) :: parr
2892  INTEGER :: kdim
2893  INTEGER, OPTIONAL :: kcom
2894  WRITE(*,*) 'mpp_sum_a2s: You should not have seen this print! error?', kdim, parr(1,1), kcom
2895  END SUBROUTINE mpp_sum_a2s
2896 
2897  SUBROUTINE mpp_sum_ai( karr, kdim, kcom ) ! Dummy routine
2898  INTEGER, DIMENSION(:) :: karr
2899  INTEGER :: kdim
2900  INTEGER, OPTIONAL :: kcom
2901  WRITE(*,*) 'mpp_sum_ai: You should not have seen this print! error?', kdim, karr(1), kcom
2902  END SUBROUTINE mpp_sum_ai
2903 
2904  SUBROUTINE mpp_sum_s( psca, kcom ) ! Dummy routine
2905  REAL :: psca
2906  INTEGER, OPTIONAL :: kcom
2907  WRITE(*,*) 'mpp_sum_s: You should not have seen this print! error?', psca, kcom
2908  END SUBROUTINE mpp_sum_s
2909 
2910  SUBROUTINE mpp_sum_i( kint, kcom ) ! Dummy routine
2911  integer :: kint
2912  INTEGER, OPTIONAL :: kcom
2913  WRITE(*,*) 'mpp_sum_i: You should not have seen this print! error?', kint, kcom
2914  END SUBROUTINE mpp_sum_i
2915 
2916  SUBROUTINE mppsum_realdd( ytab, kcom )
2917  COMPLEX(wp), INTENT(inout) :: ytab ! input scalar
2918  INTEGER , INTENT( in ), OPTIONAL :: kcom
2919  WRITE(*,*) 'mppsum_realdd: You should not have seen this print! error?', ytab
2920  END SUBROUTINE mppsum_realdd
2921 
2922  SUBROUTINE mppsum_a_realdd( ytab, kdim, kcom )
2923  INTEGER , INTENT( in ) :: kdim ! size of ytab
2924  COMPLEX(wp), DIMENSION(kdim), INTENT( inout ) :: ytab ! input array
2925  INTEGER , INTENT( in ), OPTIONAL :: kcom
2926  WRITE(*,*) 'mppsum_a_realdd: You should not have seen this print! error?', kdim, ytab(1), kcom
2927  END SUBROUTINE mppsum_a_realdd
2928 
2929  SUBROUTINE mppmax_a_real( parr, kdim, kcom )
2930  REAL , DIMENSION(:) :: parr
2931  INTEGER :: kdim
2932  INTEGER, OPTIONAL :: kcom
2933  WRITE(*,*) 'mppmax_a_real: You should not have seen this print! error?', kdim, parr(1), kcom
2934  END SUBROUTINE mppmax_a_real
2935 
2936  SUBROUTINE mppmax_real( psca, kcom )
2937  REAL :: psca
2938  INTEGER, OPTIONAL :: kcom
2939  WRITE(*,*) 'mppmax_real: You should not have seen this print! error?', psca, kcom
2940  END SUBROUTINE mppmax_real
2941 
2942  SUBROUTINE mppmin_a_real( parr, kdim, kcom )
2943  REAL , DIMENSION(:) :: parr
2944  INTEGER :: kdim
2945  INTEGER, OPTIONAL :: kcom
2946  WRITE(*,*) 'mppmin_a_real: You should not have seen this print! error?', kdim, parr(1), kcom
2947  END SUBROUTINE mppmin_a_real
2948 
2949  SUBROUTINE mppmin_real( psca, kcom )
2950  REAL :: psca
2951  INTEGER, OPTIONAL :: kcom
2952  WRITE(*,*) 'mppmin_real: You should not have seen this print! error?', psca, kcom
2953  END SUBROUTINE mppmin_real
2954 
2955  SUBROUTINE mppmax_a_int( karr, kdim ,kcom)
2956  INTEGER, DIMENSION(:) :: karr
2957  INTEGER :: kdim
2958  INTEGER, OPTIONAL :: kcom
2959  WRITE(*,*) 'mppmax_a_int: You should not have seen this print! error?', kdim, karr(1), kcom
2960  END SUBROUTINE mppmax_a_int
2961 
2962  SUBROUTINE mppmax_int( kint, kcom)
2963  INTEGER :: kint
2964  INTEGER, OPTIONAL :: kcom
2965  WRITE(*,*) 'mppmax_int: You should not have seen this print! error?', kint, kcom
2966  END SUBROUTINE mppmax_int
2967 
2968  SUBROUTINE mppmin_a_int( karr, kdim, kcom )
2969  INTEGER, DIMENSION(:) :: karr
2970  INTEGER :: kdim
2971  INTEGER, OPTIONAL :: kcom
2972  WRITE(*,*) 'mppmin_a_int: You should not have seen this print! error?', kdim, karr(1), kcom
2973  END SUBROUTINE mppmin_a_int
2974 
2975  SUBROUTINE mppmin_int( kint, kcom )
2976  INTEGER :: kint
2977  INTEGER, OPTIONAL :: kcom
2978  WRITE(*,*) 'mppmin_int: You should not have seen this print! error?', kint, kcom
2979  END SUBROUTINE mppmin_int
2980 
2981  SUBROUTINE mppobc_1d( parr, kd1, kd2, kl, kk, ktype, kij, knum )
2982  INTEGER :: kd1, kd2, kl , kk, ktype, kij, knum
2983  REAL, DIMENSION(:) :: parr ! variable array
2984  WRITE(*,*) 'mppobc: You should not have seen this print! error?', parr(1), kd1, kd2, kl, kk, ktype, kij, knum
2985  END SUBROUTINE mppobc_1d
2986 
2987  SUBROUTINE mppobc_2d( parr, kd1, kd2, kl, kk, ktype, kij, knum )
2988  INTEGER :: kd1, kd2, kl , kk, ktype, kij, knum
2989  REAL, DIMENSION(:,:) :: parr ! variable array
2990  WRITE(*,*) 'mppobc: You should not have seen this print! error?', parr(1,1), kd1, kd2, kl, kk, ktype, kij, knum
2991  END SUBROUTINE mppobc_2d
2992 
2993  SUBROUTINE mppobc_3d( parr, kd1, kd2, kl, kk, ktype, kij, knum )
2994  INTEGER :: kd1, kd2, kl , kk, ktype, kij, knum
2995  REAL, DIMENSION(:,:,:) :: parr ! variable array
2996  WRITE(*,*) 'mppobc: You should not have seen this print! error?', parr(1,1,1), kd1, kd2, kl, kk, ktype, kij, knum
2997  END SUBROUTINE mppobc_3d
2998 
2999  SUBROUTINE mppobc_4d( parr, kd1, kd2, kl, kk, ktype, kij, knum )
3000  INTEGER :: kd1, kd2, kl , kk, ktype, kij, knum
3001  REAL, DIMENSION(:,:,:,:) :: parr ! variable array
3002  WRITE(*,*) 'mppobc: You should not have seen this print! error?', parr(1,1,1,1), kd1, kd2, kl, kk, ktype, kij, knum
3003  END SUBROUTINE mppobc_4d
3004 
3005  SUBROUTINE mpp_minloc2d( ptab, pmask, pmin, ki, kj )
3006  REAL :: pmin
3007  REAL , DIMENSION (:,:) :: ptab, pmask
3008  INTEGER :: ki, kj
3009  WRITE(*,*) 'mpp_minloc2d: You should not have seen this print! error?', pmin, ki, kj, ptab(1,1), pmask(1,1)
3010  END SUBROUTINE mpp_minloc2d
3011 
3012  SUBROUTINE mpp_minloc3d( ptab, pmask, pmin, ki, kj, kk )
3013  REAL :: pmin
3014  REAL , DIMENSION (:,:,:) :: ptab, pmask
3015  INTEGER :: ki, kj, kk
3016  WRITE(*,*) 'mpp_minloc3d: You should not have seen this print! error?', pmin, ki, kj, kk, ptab(1,1,1), pmask(1,1,1)
3017  END SUBROUTINE mpp_minloc3d
3018 
3019  SUBROUTINE mpp_maxloc2d( ptab, pmask, pmax, ki, kj )
3020  REAL :: pmax
3021  REAL , DIMENSION (:,:) :: ptab, pmask
3022  INTEGER :: ki, kj
3023  WRITE(*,*) 'mpp_maxloc2d: You should not have seen this print! error?', pmax, ki, kj, ptab(1,1), pmask(1,1)
3024  END SUBROUTINE mpp_maxloc2d
3025 
3026  SUBROUTINE mpp_maxloc3d( ptab, pmask, pmax, ki, kj, kk )
3027  REAL :: pmax
3028  REAL , DIMENSION (:,:,:) :: ptab, pmask
3029  INTEGER :: ki, kj, kk
3030  WRITE(*,*) 'mpp_maxloc3d: You should not have seen this print! error?', pmax, ki, kj, kk, ptab(1,1,1), pmask(1,1,1)
3031  END SUBROUTINE mpp_maxloc3d
3032 
3033  SUBROUTINE mppstop
3034  WRITE(*,*) 'mppstop: You should not have seen this print if running in mpp mode! error?...'
3035  WRITE(*,*) 'mppstop: ..otherwise this is a stop condition raised by ctl_stop in single processor mode'
3036  stop
3037  END SUBROUTINE mppstop
3038 
3039  SUBROUTINE mpp_ini_ice( kcom, knum )
3040  INTEGER :: kcom, knum
3041  WRITE(*,*) 'mpp_ini_ice: You should not have seen this print! error?', kcom, knum
3042  END SUBROUTINE mpp_ini_ice
3043 
3044  SUBROUTINE mpp_ini_znl( knum )
3045  INTEGER :: knum
3046  WRITE(*,*) 'mpp_ini_znl: You should not have seen this print! error?', knum
3047  END SUBROUTINE mpp_ini_znl
3048 
3049  SUBROUTINE mpp_comm_free( kcom )
3050  INTEGER :: kcom
3051  WRITE(*,*) 'mpp_comm_free: You should not have seen this print! error?', kcom
3052  END SUBROUTINE mpp_comm_free
3053 #endif
3054 #endif
3055 #if ! defined in_surfex
3056 
3057  !!----------------------------------------------------------------------
3058  !! All cases: ctl_stop, ctl_warn, get_unit, ctl_opn routines
3059  !!----------------------------------------------------------------------
3060 
3061  SUBROUTINE ctl_stop( cd1, cd2, cd3, cd4, cd5 , &
3062  & cd6, cd7, cd8, cd9, cd10 )
3063  !!----------------------------------------------------------------------
3064  !! *** ROUTINE stop_opa ***
3065  !!
3066  !! ** Purpose : print in ocean.outpput file a error message and
3067  !! increment the error number (nstop) by one.
3068  !!----------------------------------------------------------------------
3069  CHARACTER(len=*), INTENT(in), OPTIONAL :: cd1, cd2, cd3, cd4, cd5
3070  CHARACTER(len=*), INTENT(in), OPTIONAL :: cd6, cd7, cd8, cd9, cd10
3071  !!----------------------------------------------------------------------
3072  !
3073  nstop = nstop + 1
3074  IF(lwp) THEN
3075  WRITE(numout,cform_err)
3076  IF( present(cd1 ) ) WRITE(numout,*) cd1
3077  IF( present(cd2 ) ) WRITE(numout,*) cd2
3078  IF( present(cd3 ) ) WRITE(numout,*) cd3
3079  IF( present(cd4 ) ) WRITE(numout,*) cd4
3080  IF( present(cd5 ) ) WRITE(numout,*) cd5
3081  IF( present(cd6 ) ) WRITE(numout,*) cd6
3082  IF( present(cd7 ) ) WRITE(numout,*) cd7
3083  IF( present(cd8 ) ) WRITE(numout,*) cd8
3084  IF( present(cd9 ) ) WRITE(numout,*) cd9
3085  IF( present(cd10) ) WRITE(numout,*) cd10
3086  ENDIF
3087  CALL flush(numout )
3088  IF( numstp /= -1 ) CALL flush(numstp )
3089  IF( numsol /= -1 ) CALL flush(numsol )
3090  IF( numevo_ice /= -1 ) CALL flush(numevo_ice)
3091  !
3092  IF( cd1 == 'STOP' ) THEN
3093  IF(lwp) WRITE(numout,*) 'huge E-R-R-O-R : immediate stop'
3094  CALL mppstop()
3095  ENDIF
3096  !
3097  END SUBROUTINE ctl_stop
3098 
3099 
3100  SUBROUTINE ctl_warn( cd1, cd2, cd3, cd4, cd5, &
3101  & cd6, cd7, cd8, cd9, cd10 )
3102  !!----------------------------------------------------------------------
3103  !! *** ROUTINE stop_warn ***
3104  !!
3105  !! ** Purpose : print in ocean.outpput file a error message and
3106  !! increment the warning number (nwarn) by one.
3107  !!----------------------------------------------------------------------
3108  CHARACTER(len=*), INTENT(in), OPTIONAL :: cd1, cd2, cd3, cd4, cd5
3109  CHARACTER(len=*), INTENT(in), OPTIONAL :: cd6, cd7, cd8, cd9, cd10
3110  !!----------------------------------------------------------------------
3111  !
3112  nwarn = nwarn + 1
3113  IF(lwp) THEN
3114  WRITE(numout,cform_war)
3115  IF( present(cd1 ) ) WRITE(numout,*) cd1
3116  IF( present(cd2 ) ) WRITE(numout,*) cd2
3117  IF( present(cd3 ) ) WRITE(numout,*) cd3
3118  IF( present(cd4 ) ) WRITE(numout,*) cd4
3119  IF( present(cd5 ) ) WRITE(numout,*) cd5
3120  IF( present(cd6 ) ) WRITE(numout,*) cd6
3121  IF( present(cd7 ) ) WRITE(numout,*) cd7
3122  IF( present(cd8 ) ) WRITE(numout,*) cd8
3123  IF( present(cd9 ) ) WRITE(numout,*) cd9
3124  IF( present(cd10) ) WRITE(numout,*) cd10
3125  ENDIF
3126  CALL flush(numout)
3127  !
3128  END SUBROUTINE ctl_warn
3129 
3130 
3131  SUBROUTINE ctl_opn( knum, cdfile, cdstat, cdform, cdacce, klengh, kout, ldwp, karea )
3132  !!----------------------------------------------------------------------
3133  !! *** ROUTINE ctl_opn ***
3134  !!
3135  !! ** Purpose : Open file and check if required file is available.
3136  !!
3137  !! ** Method : Fortan open
3138  !!----------------------------------------------------------------------
3139  INTEGER , INTENT( out) :: knum ! logical unit to open
3140  CHARACTER(len=*) , INTENT(in ) :: cdfile ! file name to open
3141  CHARACTER(len=*) , INTENT(in ) :: cdstat ! disposition specifier
3142  CHARACTER(len=*) , INTENT(in ) :: cdform ! formatting specifier
3143  CHARACTER(len=*) , INTENT(in ) :: cdacce ! access specifier
3144  INTEGER , INTENT(in ) :: klengh ! record length
3145  INTEGER , INTENT(in ) :: kout ! number of logical units for write
3146  LOGICAL , INTENT(in ) :: ldwp ! boolean term for print
3147  INTEGER, OPTIONAL, INTENT(in ) :: karea ! proc number
3148  !!
3149  CHARACTER(len=80) :: clfile
3150  INTEGER :: iost
3151  !!----------------------------------------------------------------------
3152 
3153  ! adapt filename
3154  ! ----------------
3155  clfile = trim(cdfile)
3156  IF( present( karea ) ) THEN
3157  IF( karea > 1 ) WRITE(clfile, "(a,'_',i4.4)") trim(clfile), karea-1
3158  ENDIF
3159 #if defined key_agrif
3160  IF( .NOT. agrif_root() ) clfile = trim(agrif_cfixed())//'_'//trim(clfile)
3161  knum=agrif_get_unit()
3162 #else
3163  knum=get_unit()
3164 #endif
3165 
3166  iost=0
3167  IF( cdacce(1:6) == 'DIRECT' ) THEN
3168  OPEN( unit=knum, file=clfile, form=cdform, access=cdacce, status=cdstat, recl=klengh, err=100, iostat=iost )
3169  ELSE
3170  OPEN( unit=knum, file=clfile, form=cdform, access=cdacce, status=cdstat , err=100, iostat=iost )
3171  ENDIF
3172  IF( iost == 0 ) THEN
3173  IF(ldwp) THEN
3174  WRITE(kout,*) ' file : ', clfile,' open ok'
3175  WRITE(kout,*) ' unit = ', knum
3176  WRITE(kout,*) ' status = ', cdstat
3177  WRITE(kout,*) ' form = ', cdform
3178  WRITE(kout,*) ' access = ', cdacce
3179  WRITE(kout,*)
3180  ENDIF
3181  ENDIF
3182 100 CONTINUE
3183  IF( iost /= 0 ) THEN
3184  IF(ldwp) THEN
3185  WRITE(kout,*)
3186  WRITE(kout,*) ' ===>>>> : bad opening file: ', clfile
3187  WRITE(kout,*) ' ======= === '
3188  WRITE(kout,*) ' unit = ', knum
3189  WRITE(kout,*) ' status = ', cdstat
3190  WRITE(kout,*) ' form = ', cdform
3191  WRITE(kout,*) ' access = ', cdacce
3192  WRITE(kout,*) ' iostat = ', iost
3193  WRITE(kout,*) ' we stop. verify the file '
3194  WRITE(kout,*)
3195  ENDIF
3196  stop 'ctl_opn bad opening'
3197  ENDIF
3198 
3199  END SUBROUTINE ctl_opn
3200 
3201 
3202  INTEGER FUNCTION get_unit()
3203  !!----------------------------------------------------------------------
3204  !! *** FUNCTION get_unit ***
3205  !!
3206  !! ** Purpose : return the index of an unused logical unit
3207  !!----------------------------------------------------------------------
3208  LOGICAL :: llopn
3209  !!----------------------------------------------------------------------
3210  !
3211  get_unit = 15 ! choose a unit that is big enough then it is not already used in NEMO
3212  llopn = .true.
3213  DO WHILE( (get_unit < 998) .AND. llopn )
3214  get_unit = get_unit + 1
3215  INQUIRE( unit = get_unit, opened = llopn )
3216  END DO
3217  IF( (get_unit == 999) .AND. llopn ) THEN
3218  CALL ctl_stop( 'get_unit: All logical units until 999 are used...' )
3219  get_unit = -1
3220  ENDIF
3221  !
3222  END FUNCTION get_unit
3223 
3224  !!----------------------------------------------------------------------
3225 #endif
3226 END MODULE lib_mpp
subroutine mpp_lbc_north_3d(pt3d, cd_type, psgn)
Definition: lib_mpp.F90:2411
subroutine, public mppstop
Definition: lib_mpp.F90:1940
subroutine, public mppsync()
Definition: lib_mpp.F90:1925
subroutine mpi_init_opa(ldtxt, ksft, code)
Definition: lib_mpp.F90:2747
subroutine mpp_sum_i(kint, kcom)
Definition: lib_mpp.F90:2910
subroutine mppmax_int(kint, kcom)
Definition: lib_mpp.F90:2962
subroutine, public mppobc(ptab, kd1, kd2, kl, kk, ktype, kij, kumout)
Definition: lib_mpp.F90:1956
subroutine mppsum_a_real(ptab, kdim, kcom)
Definition: lib_mpp.F90:1638
subroutine, public mpp_lnk_2d(pt2d, cd_type, psgn, cd_mpp, pval)
Definition: lib_mpp.F90:628
subroutine mpp_maxloc2d(ptab, pmask, pmax, ki, kj)
Definition: lib_mpp.F90:3019
subroutine mpp_minloc3d(ptab, pmask, pmin, ki, kj, kk)
Definition: lib_mpp.F90:3012
subroutine, public mpp_ini_north
Definition: lib_mpp.F90:2350
subroutine mppsum_a_realdd(ytab, kdim, kcom)
Definition: lib_mpp.F90:2922
subroutine mppmin_real(psca, kcom)
Definition: lib_mpp.F90:2949
subroutine mppsum_a_int(ktab, kdim)
Definition: lib_mpp.F90:1437
subroutine, public mpp_lbc_north_e(pt2d, cd_type, psgn)
Definition: lib_mpp.F90:2677
subroutine, public mppscatter(pio, kp, ptab)
Definition: lib_mpp.F90:1274
subroutine, public mppsend(ktyp, pmess, kbytes, kdest, md_req)
Definition: lib_mpp.F90:1193
subroutine mppmin_int(kint, kcom)
Definition: lib_mpp.F90:2975
subroutine mppmax_a_int(karr, kdim, kcom)
Definition: lib_mpp.F90:2955
subroutine, public mpp_ini_znl(kumout)
Definition: lib_mpp.F90:2250
subroutine mpp_minloc2d(ptab, pmask, pmin, ki, kj)
Definition: lib_mpp.F90:3005
subroutine abor1_sfx(YTEXT)
Definition: abor1_sfx.F90:6
subroutine, public mpp_lnk_3d_gather(ptab1, cd_type1, ptab2, cd_type2, psgn)
Definition: lib_mpp.F90:822
subroutine mppobc_3d(parr, kd1, kd2, kl, kk, ktype, kij, knum)
Definition: lib_mpp.F90:2993
subroutine mppmin_a_int(karr, kdim, kcom)
Definition: lib_mpp.F90:2968
subroutine, public mppgather(ptab, kp, pio)
Definition: lib_mpp.F90:1252
subroutine mpp_maxloc3d(ptab, pmask, pmax, ki, kj, kk)
Definition: lib_mpp.F90:3026
INTEGER function, public lib_mpp_alloc(kumout)
Definition: lib_mpp.F90:262
subroutine mppmin_a_real(parr, kdim, kcom)
Definition: lib_mpp.F90:2942
subroutine, public ctl_opn(knum, cdfile, cdstat, cdform, cdacce, klengh, kout, ldwp, karea)
Definition: lib_mpp.F90:3131
subroutine mpp_sum_ai(karr, kdim, kcom)
Definition: lib_mpp.F90:2897
subroutine mppobc_4d(parr, kd1, kd2, kl, kk, ktype, kij, knum)
Definition: lib_mpp.F90:2999
subroutine, public mpp_lnk_2d_e(pt2d, cd_type, psgn)
Definition: lib_mpp.F90:1016
subroutine, public mpprecv(ktyp, pmess, kbytes, ksource)
Definition: lib_mpp.F90:1222
subroutine mpp_comm_free(kcom)
Definition: lib_mpp.F90:3049
subroutine, public mpp_ini_ice(pindic, kumout)
Definition: lib_mpp.F90:2165
subroutine mpp_sum_a2s(parr, kdim, kcom)
Definition: lib_mpp.F90:2890
subroutine mppsum_int(ktab)
Definition: lib_mpp.F90:1466
subroutine ddpdd_mpi(ydda, yddb, ilen, itype)
Definition: lib_mpp.F90:2806
subroutine mppmax_real(psca, kcom)
Definition: lib_mpp.F90:2936
subroutine, public mpp_lnk_3d(ptab, cd_type, psgn, cd_mpp, pval)
Definition: lib_mpp.F90:431
integer function, public mynode(ldtxt, kumnam, kstop, localComm)
Definition: lib_mpp.F90:302
subroutine mppobc_2d(parr, kd1, kd2, kl, kk, ktype, kij, knum)
Definition: lib_mpp.F90:2987
subroutine mppsum_realdd(ytab, kcom)
Definition: lib_mpp.F90:2916
subroutine mpp_sum_s(psca, kcom)
Definition: lib_mpp.F90:2904
subroutine mppmax_a_real(parr, kdim, kcom)
Definition: lib_mpp.F90:2929
subroutine mpp_sum_as(parr, kdim, kcom)
Definition: lib_mpp.F90:2883
INTEGER function, public get_unit()
Definition: lib_mpp.F90:3202
subroutine mppobc_1d(parr, kd1, kd2, kl, kk, ktype, kij, knum)
Definition: lib_mpp.F90:2981
subroutine, public ctl_stop(cd1, cd2, cd3, cd4, cd5, cd6, cd7, cd8, cd9, cd10)
Definition: lib_mpp.F90:3061
subroutine mpp_lbc_north_2d(pt2d, cd_type, psgn)
Definition: lib_mpp.F90:2543
subroutine mppsum_real(ptab, kcom)
Definition: lib_mpp.F90:1671
subroutine, public ctl_warn(cd1, cd2, cd3, cd4, cd5, cd6, cd7, cd8, cd9, cd10)
Definition: lib_mpp.F90:3100