Skip to content

Commit

Permalink
Merge branch 'main' into production-pilot-up2date
Browse files Browse the repository at this point in the history
  • Loading branch information
MTCam committed Aug 29, 2024
2 parents 1e553b1 + 245463d commit be3f127
Show file tree
Hide file tree
Showing 11 changed files with 41 additions and 42 deletions.
2 changes: 1 addition & 1 deletion doc/upload-docs.sh
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
#! /bin/sh

rsync --verbose --archive --delete _build/html/* doc-upload:doc/grudge
rsync --verbose --archive --delete _build/html/ doc-upload:doc/grudge
6 changes: 3 additions & 3 deletions examples/advection/surface.py
Original file line number Diff line number Diff line change
Expand Up @@ -214,15 +214,15 @@ def rhs(t, u):
logger.info("nsteps: %d", nsteps)

from grudge.shortcuts import set_up_rk4
dt_stepper = set_up_rk4("u", dt, u0, rhs)
dt_stepper = set_up_rk4("u", float(dt), u0, rhs)
plot = Plotter(actx, dcoll, order, visualize=visualize)

norm_u = actx.to_numpy(op.norm(dcoll, u0, 2))

step = 0

event = dt_stepper.StateComputed(0.0, 0, 0, u0)
plot(event, "fld-surface-%04d" % 0)
plot(event, f"fld-surface-{0:04d}")

if visualize and dim == 3:
from grudge.shortcuts import make_visualizer
Expand Down Expand Up @@ -253,7 +253,7 @@ def rhs(t, u):
step += 1
if step % 10 == 0:
norm_u = actx.to_numpy(op.norm(dcoll, event.state_component, 2))
plot(event, "fld-surface-%04d" % step)
plot(event, f"fld-surface-{step:04d}")

logger.info("[%04d] t = %.5f |u| = %.5e", step, event.t, norm_u)

Expand Down
4 changes: 2 additions & 2 deletions examples/advection/var-velocity.py
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,7 @@ def rhs(t, u):
# {{{ time stepping

from grudge.shortcuts import set_up_rk4
dt_stepper = set_up_rk4("u", dt, u, rhs)
dt_stepper = set_up_rk4("u", float(dt), u, rhs)
plot = Plotter(actx, dcoll, order, visualize=visualize,
ylim=[-0.1, 1.1])

Expand All @@ -211,7 +211,7 @@ def rhs(t, u):

if step % 10 == 0:
norm_u = actx.to_numpy(op.norm(dcoll, event.state_component, 2))
plot(event, "fld-var-velocity-%04d" % step)
plot(event, f"fld-var-velocity-{step:04d}")

logger.info("[%04d] t = %.5f |u| = %.5e", step, event.t, norm_u)
# NOTE: These are here to ensure the solution is bounded for the
Expand Down
4 changes: 2 additions & 2 deletions examples/advection/weak.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ def rhs(t, u):
# {{{ time stepping

from grudge.shortcuts import set_up_rk4
dt_stepper = set_up_rk4("u", dt, u, rhs)
dt_stepper = set_up_rk4("u", float(dt), u, rhs)
plot = Plotter(actx, dcoll, order, visualize=visualize,
ylim=[-1.1, 1.1])

Expand All @@ -184,7 +184,7 @@ def rhs(t, u):

if step % 10 == 0:
norm_u = actx.to_numpy(op.norm(dcoll, event.state_component, 2))
plot(event, "fld-weak-%04d" % step)
plot(event, f"fld-weak-{step:04d}")

step += 1
logger.info("[%04d] t = %.5f |u| = %.5e", step, event.t, norm_u)
Expand Down
11 changes: 2 additions & 9 deletions grudge/array_context.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ class PyOpenCLArrayContext(_PyOpenCLArrayContextBase):
def __init__(self, queue: "pyopencl.CommandQueue",
allocator: Optional["pyopencl.tools.AllocatorBase"] = None,
wait_event_queue_length: Optional[int] = None,
force_device_scalars: bool = False) -> None:
force_device_scalars: bool = True) -> None:

if allocator is None:
warn("No memory allocator specified, please pass one. "
Expand Down Expand Up @@ -513,7 +513,7 @@ def __init__(self,
queue: "pyopencl.CommandQueue",
*, allocator: Optional["pyopencl.tools.AllocatorBase"] = None,
wait_event_queue_length: Optional[int] = None,
force_device_scalars: bool = False) -> None:
force_device_scalars: bool = True) -> None:
"""
See :class:`arraycontext.impl.pyopencl.PyOpenCLArrayContext` for most
arguments.
Expand Down Expand Up @@ -602,13 +602,6 @@ def __call__(self):
return self.actx_class(queue, allocator=alloc)


# deprecated
class PytestPyOpenCLArrayContextFactoryWithHostScalars(
_PytestPyOpenCLArrayContextFactoryWithClass):
actx_class = PyOpenCLArrayContext
force_device_scalars = False


register_pytest_array_context_factory("grudge.pyopencl",
PytestPyOpenCLArrayContextFactory)
register_pytest_array_context_factory("grudge.pytato-pyopencl",
Expand Down
7 changes: 0 additions & 7 deletions grudge/dof_desc.py
Original file line number Diff line number Diff line change
Expand Up @@ -285,13 +285,6 @@ def uses_quadrature(self) -> bool:
raise ValueError(
f"Invalid discretization tag: {self.discretization_tag}")

def with_dtag(self, dtag) -> "DOFDesc":
from warnings import warn
warn("'with_dtag' is deprecated. Use 'with_domain_tag' instead. "
"This will stop working in 2023",
DeprecationWarning, stacklevel=2)
return replace(self, domain_tag=dtag)

def with_domain_tag(self, dtag) -> "DOFDesc":
return replace(self, domain_tag=dtag)

Expand Down
2 changes: 1 addition & 1 deletion grudge/geometry/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -844,7 +844,7 @@ def second_fundamental_form(
elif dim == 2:
second_ref_axes = [((0, 2),), ((0, 1), (1, 1)), ((1, 2),)]
else:
raise ValueError("%dD surfaces not supported" % dim)
raise ValueError(f"{dim}D surfaces not supported")

from pytools import flatten

Expand Down
5 changes: 3 additions & 2 deletions grudge/op.py
Original file line number Diff line number Diff line change
Expand Up @@ -387,8 +387,9 @@ def compute_simplicial_grad(actx, in_grp, out_grp, get_diff_mat, vec_i,
]

return make_obj_array([
DOFArray(
actx, data=tuple([pgg_i[xyz_axis] for pgg_i in per_group_grads]))
DOFArray(actx, data=tuple([ # noqa: C409
pgg_i[xyz_axis] for pgg_i in per_group_grads
]))
for xyz_axis in range(out_discr.ambient_dim)])


Expand Down
30 changes: 19 additions & 11 deletions grudge/reductions.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,7 @@
from pytools import memoize_in

import grudge.dof_desc as dof_desc
from grudge.array_context import MPIBasedArrayContext
from grudge.discretization import DiscretizationCollection


Expand Down Expand Up @@ -128,16 +129,17 @@ def nodal_sum(dcoll: DiscretizationCollection, dd, vec) -> Scalar:
:class:`~arraycontext.ArrayContainer`.
:returns: a device scalar denoting the nodal sum.
"""
comm = dcoll.mpi_communicator
if comm is None:
from arraycontext import get_container_context_recursively
actx = get_container_context_recursively(vec)

if not isinstance(actx, MPIBasedArrayContext):
return nodal_sum_loc(dcoll, dd, vec)

comm = actx.mpi_communicator

# NOTE: Do not move, we do not want to import mpi4py in single-rank computations
from mpi4py import MPI

from arraycontext import get_container_context_recursively
actx = get_container_context_recursively(vec)

return actx.from_numpy(
comm.allreduce(actx.to_numpy(nodal_sum_loc(dcoll, dd, vec)), op=MPI.SUM))

Expand Down Expand Up @@ -174,13 +176,16 @@ def nodal_min(dcoll: DiscretizationCollection, dd, vec, *, initial=None) -> Scal
:arg initial: an optional initial value. Defaults to `numpy.inf`.
:returns: a device scalar denoting the nodal minimum.
"""
comm = dcoll.mpi_communicator
if comm is None:
from arraycontext import get_container_context_recursively
actx = get_container_context_recursively(vec)

if not isinstance(actx, MPIBasedArrayContext):
return nodal_min_loc(dcoll, dd, vec, initial=initial)

comm = actx.mpi_communicator

# NOTE: Do not move, we do not want to import mpi4py in single-rank computations
from mpi4py import MPI
actx = vec.array_context

return actx.from_numpy(
comm.allreduce(
Expand Down Expand Up @@ -231,13 +236,16 @@ def nodal_max(dcoll: DiscretizationCollection, dd, vec, *, initial=None) -> Scal
:arg initial: an optional initial value. Defaults to `-numpy.inf`.
:returns: a device scalar denoting the nodal maximum.
"""
comm = dcoll.mpi_communicator
if comm is None:
from arraycontext import get_container_context_recursively
actx = get_container_context_recursively(vec)

if not isinstance(actx, MPIBasedArrayContext):
return nodal_max_loc(dcoll, dd, vec, initial=initial)

comm = actx.mpi_communicator

# NOTE: Do not move, we do not want to import mpi4py in single-rank computations
from mpi4py import MPI
actx = vec.array_context

return actx.from_numpy(
comm.allreduce(
Expand Down
6 changes: 5 additions & 1 deletion grudge/trace_pair.py
Original file line number Diff line number Diff line change
Expand Up @@ -555,7 +555,11 @@ def __init__(self,
remote_bdry_data_template: ArrayOrContainer,
comm_tag: Optional[Hashable] = None):

comm = dcoll.mpi_communicator
# inducer/grudge@main has this
# local_bdry_data = project(dcoll, volume_dd, bdry_dd, array_container)

comm = actx.mpi_communicator

assert comm is not None

remote_rank = remote_part_id.rank
Expand Down
6 changes: 3 additions & 3 deletions test/test_grudge.py
Original file line number Diff line number Diff line change
Expand Up @@ -793,7 +793,7 @@ def test_convergence_advec(actx_factory, mesh_name, mesh_pars, op_type, flux_typ
elif dim == 3:
dt_factor = 2
else:
raise ValueError("dt_factor not known for %dd" % dim)
raise ValueError(f"dt_factor not known for {dim}d")
elif mesh_name.startswith("warped"):
dim = int(mesh_name[-1:])
mesh = mgen.generate_warped_rect_mesh(dim, order=order,
Expand All @@ -804,7 +804,7 @@ def test_convergence_advec(actx_factory, mesh_name, mesh_pars, op_type, flux_typ
elif dim == 3:
dt_factor = 2
else:
raise ValueError("dt_factor not known for %dd" % dim)
raise ValueError(f"dt_factor not known for {dim}d")
else:
raise ValueError("invalid mesh name: " + mesh_name)

Expand Down Expand Up @@ -870,7 +870,7 @@ def rhs(t, u, adv_operator=adv_operator):

if visualize:
vis.write_vtk_file(
"fld-%s-%04d.vtu" % (mesh_par, step),
f"fld-{mesh_par}-{step:04d}vtu" % (mesh_par, step),
[("u", u)]
)

Expand Down

0 comments on commit be3f127

Please sign in to comment.