Actual source code: ex17f.F90

  1: !
  2: !
  3: !   "Scatters from a parallel vector to a sequential vector.  In
  4: !  this case each local vector is as long as the entire parallel vector.
  5: !
  6: #include <petsc/finclude/petscvec.h>
  7:       use petscvec
  8:       implicit none

 10:       PetscErrorCode ierr
 11:       PetscMPIInt  size,rank
 12:       PetscInt     n,NN,low,high
 13:       PetscInt     iglobal,i,ione
 14:       PetscInt     first,stride
 15:       PetscScalar  value,zero
 16:       Vec          x,y
 17:       IS           is1,is2
 18:       VecScatter   ctx

 20:       n    = 5
 21:       zero = 0.0
 22:       PetscCallA(PetscInitialize(ierr))

 24:       PetscCallMPIA(MPI_Comm_size(PETSC_COMM_WORLD,size,ierr))
 25:       PetscCallMPIA(MPI_Comm_rank(PETSC_COMM_WORLD,rank,ierr))

 27: !     create two vectors
 28: !     one parallel and one sequential. The sequential one on each processor
 29: !     is as long as the entire parallel one.

 31:       NN = size*n
 32:       ione = 1
 33:       PetscCallA(VecCreateFromOptions(PETSC_COMM_WORLD,PETSC_NULL_CHARACTER,ione,PETSC_DECIDE,NN,y,ierr))
 34:       PetscCallA(VecCreateFromOptions(PETSC_COMM_SELF,PETSC_NULL_CHARACTER,ione,NN,NN,x,ierr))

 36:       PetscCallA(VecSet(x,zero,ierr))
 37:       PetscCallA(VecGetOwnershipRange(y,low,high,ierr))
 38:       ione = 1
 39:       do 10, i=0,n-1
 40:          iglobal = i + low
 41:          value   = i + 10*rank
 42:          PetscCallA(VecSetValues(y,ione,iglobal,value,INSERT_VALUES,ierr))
 43:  10   continue

 45:       PetscCallA(VecAssemblyBegin(y,ierr))
 46:       PetscCallA(VecAssemblyEnd(y,ierr))
 47: !
 48: !   View the parallel vector
 49: !
 50:       PetscCallA(VecView(y,PETSC_VIEWER_STDOUT_WORLD,ierr))

 52: !     create two index sets and the scatter context to move the contents of
 53: !     of the parallel vector to each sequential vector. If you want the
 54: !     parallel vector delivered to only one processor then create a is2
 55: !     of length zero on all processors except the one to receive the parallel vector

 57:       first = 0
 58:       stride = 1
 59:       PetscCallA(ISCreateStride(PETSC_COMM_SELF,NN,first,stride,is1,ierr))
 60:       PetscCallA(ISCreateStride(PETSC_COMM_SELF,NN,first,stride,is2,ierr))
 61:       PetscCallA(VecScatterCreate(y,is2,x,is1,ctx,ierr))
 62:       PetscCallA(VecScatterBegin(ctx,y,x,ADD_VALUES,SCATTER_FORWARD,ierr))
 63:       PetscCallA(VecScatterEnd(ctx,y,x,ADD_VALUES,SCATTER_FORWARD,ierr))
 64:       PetscCallA(VecScatterDestroy(ctx,ierr))
 65: !
 66: !   View the sequential vector on the 0th processor
 67: !
 68:       if (rank .eq. 0) then
 69:         PetscCallA(VecView(x,PETSC_VIEWER_STDOUT_SELF,ierr))
 70:       endif

 72: #if defined(PETSC_HAVE_FORTRAN_TYPE_STAR)
 73:       PetscCallA(PetscBarrier(y,ierr))
 74:       PetscCallA(PetscBarrier(is1,ierr))
 75: #endif
 76:       PetscCallA(VecDestroy(x,ierr))
 77:       PetscCallA(VecDestroy(y,ierr))
 78:       PetscCallA(ISDestroy(is1,ierr))
 79:       PetscCallA(ISDestroy(is2,ierr))

 81:       PetscCallA(PetscFinalize(ierr))
 82:       end

 84: !/*TEST
 85: !
 86: !     test:
 87: !       nsize: 3
 88: !       filter:  grep -v " MPI process"
 89: !
 90: !TEST*/