diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 028a9ca7..a297012d 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -1,11 +1,16 @@
# Contributing to fAIr
-## Welcome
+## :hugs: Welcome
:+1::tada: First off, I'm really glad you're reading this, because we need volunteer developers to help with the development of fAIr! :tada::+1:
We welcome and encourage contributors of all skill levels and we are committed to making sure your participation in our tech collective is inclusive, enjoyable and rewarding. If you have never contributed to an open-source project before, we are a good place to start and will make sure you are supported every step of the way. If you have **any** questions, please ask!
-## :handshake: Thank you
-Thank you very much in advance for your contributions!! Please ensure you refer to our [Code of Conduct](https://github.com/hotosm/fAIr/blob/master/docs/Code-of-Conduct.md).
-If you've read the guidelines, but you are still not sure how to contribute on Github, please reach out to us via our Slack #geospatial-tech-and-innovation.
+## Code contributions
+
+Fork repo, Maintain your local changes on branch and Create pull requests (PRs) for changes that you think are needed. We would really appreciate your help!
+
+
+## Documentation contributions
+
+Create pull requests (PRs) for changes that you think are needed to the documentation of fAIr.As of now you can find the documentation work at the [docs](./docs) directory.
diff --git a/backend/aiproject/settings.py b/backend/aiproject/settings.py
index 305c4b60..2159d5d0 100644
--- a/backend/aiproject/settings.py
+++ b/backend/aiproject/settings.py
@@ -33,7 +33,7 @@
HOSTNAME = env("HOSTNAME", default="127.0.0.1")
EXPORT_TOOL_API_URL = env(
"EXPORT_TOOL_API_URL",
- default=" https://api-prod.raw-data.hotosm.org/v1",
+ default="https://api-prod.raw-data.hotosm.org/v1",
)
ALLOWED_HOSTS = ["localhost", "127.0.0.1", HOSTNAME]
diff --git a/backend/core/models.py b/backend/core/models.py
index b1d67be1..5ad0e284 100644
--- a/backend/core/models.py
+++ b/backend/core/models.py
@@ -41,6 +41,7 @@ class Label(models.Model):
aoi = models.ForeignKey(AOI, to_field="id", on_delete=models.CASCADE)
geom = geomodels.GeometryField(srid=4326)
osm_id = models.BigIntegerField(null=True, blank=True)
+ tags = models.JSONField(null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
@@ -101,7 +102,7 @@ class Feedback(models.Model):
validators=[MinValueValidator(18), MaxValueValidator(23)]
)
feedback_type = models.CharField(choices=FEEDBACK_TYPE, max_length=10)
- comments = models.TextField(max_length=100,null=True,blank=True)
+ comments = models.TextField(max_length=100, null=True, blank=True)
user = models.ForeignKey(OsmUser, to_field="osm_id", on_delete=models.CASCADE)
source_imagery = models.URLField()
@@ -111,6 +112,7 @@ class DownloadStatus(models.IntegerChoices):
DOWNLOADED = 1
NOT_DOWNLOADED = -1
RUNNING = 0
+
training = models.ForeignKey(Training, to_field="id", on_delete=models.CASCADE)
geom = geomodels.PolygonField(srid=4326)
label_status = models.IntegerField(default=-1, choices=DownloadStatus.choices)
@@ -123,6 +125,10 @@ class DownloadStatus(models.IntegerChoices):
class FeedbackLabel(models.Model):
osm_id = models.BigIntegerField(null=True, blank=True)
- feedback_aoi = models.ForeignKey(FeedbackAOI, to_field="id", on_delete=models.CASCADE)
+ feedback_aoi = models.ForeignKey(
+ FeedbackAOI, to_field="id", on_delete=models.CASCADE
+ )
+ tags = models.JSONField(null=True, blank=True)
+
geom = geomodels.PolygonField(srid=4326)
created_at = models.DateTimeField(auto_now_add=True)
diff --git a/backend/core/serializers.py b/backend/core/serializers.py
index 9b08ff8b..24939aa9 100644
--- a/backend/core/serializers.py
+++ b/backend/core/serializers.py
@@ -126,7 +126,7 @@ class Meta:
model = Label
geo_field = "geom"
# auto_bbox = True
- fields = ("osm_id",)
+ fields = ("osm_id", "tags")
class FeedbackLabelFileSerializer(GeoFeatureModelSerializer):
@@ -134,7 +134,7 @@ class Meta:
model = FeedbackLabel
geo_field = "geom"
# auto_bbox = True
- fields = ("osm_id",)
+ fields = ("osm_id", "tags")
class FeedbackFileSerializer(GeoFeatureModelSerializer):
diff --git a/backend/core/tasks.py b/backend/core/tasks.py
index b78d4a96..a66e4748 100644
--- a/backend/core/tasks.py
+++ b/backend/core/tasks.py
@@ -10,17 +10,6 @@
import ramp.utils
import tensorflow as tf
from celery import shared_task
-from core.models import AOI, Feedback, FeedbackAOI, FeedbackLabel, Label, Training
-from core.serializers import (
- FeedbackFileSerializer,
- FeedbackLabelFileSerializer,
- LabelFileSerializer,
-)
-from predictor import download_imagery,get_start_end_download_coords
-from core.utils import (
- bbox,
- is_dir_empty,
-)
from django.conf import settings
from django.contrib.gis.db.models.aggregates import Extent
from django.contrib.gis.geos import GEOSGeometry
@@ -28,6 +17,17 @@
from django.utils import timezone
from hot_fair_utilities import preprocess, train
from hot_fair_utilities.training import run_feedback
+from predictor import download_imagery, get_start_end_download_coords
+
+from core.models import AOI, Feedback, FeedbackAOI, FeedbackLabel, Label, Training
+from core.serializers import (
+ AOISerializer,
+ FeedbackAOISerializer,
+ FeedbackFileSerializer,
+ FeedbackLabelFileSerializer,
+ LabelFileSerializer,
+)
+from core.utils import bbox, is_dir_empty
logger = logging.getLogger(__name__)
@@ -56,8 +56,8 @@ def train_model(
try:
## -----------IMAGE DOWNLOADER---------
os.makedirs(settings.LOG_PATH, exist_ok=True)
- if training_instance.task_id is None or training_instance.task_id.strip() == '':
- training_instance.task_id=train_model.request.id
+ if training_instance.task_id is None or training_instance.task_id.strip() == "":
+ training_instance.task_id = train_model.request.id
training_instance.save()
log_file = os.path.join(
settings.LOG_PATH, f"run_{train_model.request.id}_log.txt"
@@ -77,6 +77,8 @@ def train_model(
if feedback:
try:
aois = FeedbackAOI.objects.filter(training=feedback)
+ aoi_serializer = FeedbackAOISerializer(aois, many=True)
+
except FeedbackAOI.DoesNotExist:
raise ValueError(
f"No Feedback AOI is attached with supplied training id:{dataset_id}, Create AOI first",
@@ -85,11 +87,12 @@ def train_model(
else:
try:
aois = AOI.objects.filter(dataset=dataset_id)
+ aoi_serializer = AOISerializer(aois, many=True)
+
except AOI.DoesNotExist:
raise ValueError(
f"No AOI is attached with supplied dataset id:{dataset_id}, Create AOI first",
)
-
for obj in aois:
bbox_coords = bbox(obj.geom.coords[0])
for z in zoom_level:
@@ -223,15 +226,31 @@ def train_model(
logger.info(model.inputs)
logger.info(model.outputs)
-
+
# Convert the model to tflite for android/ios.
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
# Save the model.
- with open(os.path.join(output_path, "checkpoint.tflite"), 'wb') as f:
+ with open(os.path.join(output_path, "checkpoint.tflite"), "wb") as f:
f.write(tflite_model)
+ # dump labels to output folder as well
+ with open(
+ os.path.join(output_path, "labels.geojson"),
+ "w",
+ encoding="utf-8",
+ ) as f:
+ f.write(json.dumps(serialized_field.data))
+
+ # dump used aois as featurecollection in output
+ with open(
+ os.path.join(output_path, "aois.geojson"),
+ "w",
+ encoding="utf-8",
+ ) as f:
+ f.write(json.dumps(aoi_serializer.data))
+
# now remove the ramp-data all our outputs are copied to our training workspace
shutil.rmtree(base_path)
training_instance.accuracy = float(final_accuracy)
diff --git a/backend/core/utils.py b/backend/core/utils.py
index 9d0a9f3b..fe8c5619 100644
--- a/backend/core/utils.py
+++ b/backend/core/utils.py
@@ -189,6 +189,7 @@ def process_feature(feature, aoi_id, foreign_key_id, feedback=False):
"""Multi thread process of features"""
properties = feature["properties"]
osm_id = properties["osm_id"]
+ tags = properties["tags"]
geometry = feature["geometry"]
if feedback:
if FeedbackLabel.objects.filter(
@@ -199,7 +200,12 @@ def process_feature(feature, aoi_id, foreign_key_id, feedback=False):
).delete()
label = FeedbackLabelSerializer(
- data={"osm_id": int(osm_id), "geom": geometry, "feedback_aoi": aoi_id}
+ data={
+ "osm_id": int(osm_id),
+ "tags": tags,
+ "geom": geometry,
+ "feedback_aoi": aoi_id,
+ }
)
else:
@@ -211,7 +217,7 @@ def process_feature(feature, aoi_id, foreign_key_id, feedback=False):
).delete()
label = LabelSerializer(
- data={"osm_id": int(osm_id), "geom": geometry, "aoi": aoi_id}
+ data={"osm_id": int(osm_id), "tags": tags, "geom": geometry, "aoi": aoi_id}
)
if label.is_valid():
label.save()
@@ -239,7 +245,10 @@ def process_geojson(geojson_file_path, aoi_id, feedback=False):
max_workers = (
(os.cpu_count() - 1) if os.cpu_count() != 1 else 1
) # leave one cpu free always
-
+ if feedback:
+ FeedbackLabel.objects.filter(feedback_aoi__id=aoi_id).delete()
+ else :
+ Label.objects.filter(aoi__id=aoi_id).delete()
# max_workers = os.cpu_count() # get total cpu count available on the
with open(geojson_file_path) as f:
diff --git a/backend/core/views.py b/backend/core/views.py
index 80389981..03f4ffe4 100644
--- a/backend/core/views.py
+++ b/backend/core/views.py
@@ -192,6 +192,7 @@ class FeedbackLabelViewset(viewsets.ModelViewSet):
bbox_filter_field = "geom"
filter_backends = (
InBBoxFilter, # it will take bbox like this api/v1/label/?in_bbox=-90,29,-89,35 ,
+ DjangoFilterBackend
)
bbox_filter_include_overlapping = True
filterset_fields = ["feedback_aoi", "feedback_aoi__training"]
diff --git a/docs/Docker-installation.md b/docs/Docker-installation.md
index 96913b15..bd24ff70 100644
--- a/docs/Docker-installation.md
+++ b/docs/Docker-installation.md
@@ -33,11 +33,11 @@ Docker Compose is created with redis , worker , postgis database , api and fron
```
mkdir ramp
```
- - Download BaseModel Checkpoint from [here](https://drive.google.com/file/d/1wvJhkiOrSlHmmvJ0avkAdu9sslFf5_I0/view?usp=sharing)
+ - Download BaseModel Checkpoint from [here](https://drive.google.com/file/d/1YQsY61S_rGfJ_f6kLQq4ouYE2l3iRe1k/view)
OR You can use basemodel from [Model Ramp Baseline](https://github.com/radiantearth/model_ramp_baseline/tree/main/data/input/checkpoint.tf)
```
pip install gdown
- gdown --fuzzy https://drive.google.com/file/d/1wvJhkiOrSlHmmvJ0avkAdu9sslFf5_I0/view?usp=sharing
+ gdown --fuzzy https://drive.google.com/file/d/1YQsY61S_rGfJ_f6kLQq4ouYE2l3iRe1k/view
```
- Clone Ramp Code
diff --git a/frontend/public/josm-logo.png b/frontend/public/josm-logo.png
new file mode 100644
index 00000000..a6a1bca2
Binary files /dev/null and b/frontend/public/josm-logo.png differ
diff --git a/frontend/src/components/Layout/Feedback/Feedback.js b/frontend/src/components/Layout/Feedback/Feedback.js
index 14c852b2..e2a999d7 100644
--- a/frontend/src/components/Layout/Feedback/Feedback.js
+++ b/frontend/src/components/Layout/Feedback/Feedback.js
@@ -101,9 +101,14 @@ const Feedback = (props) => {
} else {
const datasetId = res.data.dataset;
setDatasetId(datasetId);
- const resAOIs = await axios.get(`/aoi/?dataset=${datasetId}`, null, {
- headers,
- });
+ const resAOIs = await axios.get(
+ `/workspace/download/dataset_${datasetId}/output/training_${trainingId}/aois.geojson`,
+ null,
+ {
+ headers,
+ }
+ );
+ // console.log("resAOIs", resAOIs);
setOriginalAOIs(resAOIs.data);
}
} catch (e) {
@@ -247,7 +252,7 @@ const Feedback = (props) => {
"access-token": accessToken,
};
const res = await axios.get(
- `/feedback-label/?in_bbox=${box._southWest.lng},${box._southWest.lat},${box._northEast.lng},${box._northEast.lat}`,
+ `/feedback-label/?in_bbox=${box._southWest.lng},${box._southWest.lat},${box._northEast.lng},${box._northEast.lat}&feedback_aoi__training=${trainingId}`,
{ headers }
);
console.log("res from getLabels ", res);
diff --git a/frontend/src/components/Layout/Feedback/FeedbackAOI.js b/frontend/src/components/Layout/Feedback/FeedbackAOI.js
index 873abc5c..195948a3 100644
--- a/frontend/src/components/Layout/Feedback/FeedbackAOI.js
+++ b/frontend/src/components/Layout/Feedback/FeedbackAOI.js
@@ -1,5 +1,6 @@
import React, { useContext, useEffect, useState } from "react";
import {
+ Alert,
Avatar,
Grid,
IconButton,
@@ -9,6 +10,7 @@ import {
ListItemSecondaryAction,
ListItemText,
Pagination,
+ Snackbar,
SvgIcon,
Typography,
} from "@mui/material";
@@ -43,6 +45,8 @@ const ListItemWithWiderSecondaryAction = withStyles({
const PER_PAGE = 5;
const FeedbackAOI = (props) => {
const [dense, setDense] = useState(true);
+ const [openSnack, setOpenSnack] = useState(false);
+
const getFeedbackAOIs = async () => {
try {
const res = await axios.get(
@@ -179,7 +183,7 @@ const FeedbackAOI = (props) => {
<>
Area seems to be very small for an AOI
- Make sure it is not a Label
+ Please delete it and create a bigger AOI
>
) : (
""
@@ -203,23 +207,64 @@ const FeedbackAOI = (props) => {
aria-label="comments"
sx={{ width: 24, height: 24 }}
className="margin1 transparent"
- onClick={(e) => {
- const url = `https://rapideditor.org/rapid#background=${
- props.sourceImagery
- ? "custom:" + props.sourceImagery
- : "Bing"
- }&datasets=fbRoads,msBuildings&disable_features=boundaries&map=16.00/17.9253/120.4841&gpx=&gpx=https://fair-dev.hotosm.org/api/v1/feedback-aoi/gpx/${
- layer.id
- }`;
- console.log(url);
- window.open(url, "_blank", "noreferrer");
+ onClick={async (e) => {
+ try {
+ // mutateFetch(layer.aoiId);
+ console.log("layer", layer);
+ console.log(
+ " props.sourceImagery",
+ props.sourceImagery
+ );
+
+ const Imgurl = new URL(
+ "http://127.0.0.1:8111/imagery"
+ );
+ Imgurl.searchParams.set("type", "tms");
+ Imgurl.searchParams.set("title", "Imagery");
+ Imgurl.searchParams.set(
+ "url",
+ props.sourceImagery
+ );
+ const imgResponse = await fetch(Imgurl);
+ // bounds._southWest.lng,
+ // bounds._southWest.lat,
+ // bounds._northEast.lng,
+ // bounds._northEast.lat,
+ const loadurl = new URL(
+ "http://127.0.0.1:8111/load_and_zoom"
+ );
+ loadurl.searchParams.set(
+ "bottom",
+ layer.geometry.coordinates[0][0][1]
+ );
+ loadurl.searchParams.set(
+ "top",
+ layer.geometry.coordinates[0][1][1]
+ );
+ loadurl.searchParams.set(
+ "left",
+ layer.geometry.coordinates[0][0][0]
+ );
+ loadurl.searchParams.set(
+ "right",
+ layer.geometry.coordinates[0][2][0]
+ );
+ const loadResponse = await fetch(loadurl);
+
+ if (!imgResponse.ok) {
+ setOpenSnack(true);
+ }
+ } catch (error) {
+ console.log("error", error);
+ setOpenSnack(true);
+ }
}}
>
{/* */}
@@ -319,6 +364,32 @@ const FeedbackAOI = (props) => {
)}
+ {
+ console.log("openSnack", openSnack);
+ setOpenSnack(false);
+ }}
+ message={
+
+
+ Please make sure JOSM is open and Remote Control feature is
+ enabled{" "}
+
+ Click here for more details
+
+
+
+ }
+ // action={action}
+ color="red"
+ anchorOrigin={{ vertical: "bottom", horizontal: "right" }}
+ />
>
);
};
diff --git a/frontend/src/components/Layout/Learn/Learn.js b/frontend/src/components/Layout/Learn/Learn.js
index 60ef63db..54c50b10 100644
--- a/frontend/src/components/Layout/Learn/Learn.js
+++ b/frontend/src/components/Layout/Learn/Learn.js
@@ -128,56 +128,56 @@ const Learn = () => {
title: "Step 16: Create Training for Your Model",
description:
"After creating Model for your dataset you will see following page. From here You can submit Trainings for your model. Give your epochs , Batch size and Zoom level for Training . Epochs refers to the number of times the learning algorithm will go through the entire training dataset, recommended between 20 - 60. Batch size refers to number of sample pairs to work through before updating the internal model parameters. 8 is recommended and preferred to be 8, 16, 32 ...etc . Zoom levels are the image sizes that will be downloaded during trainings (20 is recommended ) You can train on all of zoom levels . you can play with the parameters for your training after visualizing your results , Increase- Decrease batchsize / epochs or your training labels to achieve best performing model , You can Use Goldilocks Method to find best parameter for your dataset",
- image: "/learn/15.png", // Add image URL here
+ image: "/learn-resources/15.png", // Add image URL here
},
{
id: 17,
title: "Step 17: Submit Your Training",
description:
"Click on Submit Training Request and slide down, you will see your training listed there , You can check its status by clicking on info, Based on your dataset , AOI , your parameter model training may take time you can check progress on status. SUBMITTED , RUNNING , FINISHED . You can see your model accuracy and use it after it is finished. If it fails you can check the reason for it and adapt accordingly",
- image: "/learn/16.png", // Add image URL here
+ image: "/learn-resources/16.png", // Add image URL here
},
{
id: 18,
title: "Step 18: Check info of your Trainings",
description:
"Click on i icon button next to your training to visualize current terminal and process of your training , It will display accuracy validation graph after training is finished",
- image: "/learn/17.png", // Add image URL here
+ image: "/learn-resources/17.png", // Add image URL here
},
{
id: 19,
title: "Step 19: Finished Training",
description:
"You can visualize your trainings accuracy and it's graph after it is finished like this",
- image: "/learn/18.png", // Add image URL here
+ image: "/learn-resources/18.png", // Add image URL here
},
{
id: 20,
title: "Step 20: Publish Your Training",
description:
"Once you are statisfied accuracy and want to visualize its prediction you need to publish the training. You can run multiple trainings for same model to find best performing checkpoint, Each training will result different checkpoint. You can always publish another training. Click on PUblish Training button to Publish Model",
- image: "/learn/19.png", // Add image URL here
+ image: "/learn-resources/19.png", // Add image URL here
},
{
id: 21,
title: "Step 21: Start Mapping",
description:
"Once Model is Published it will be listed here on Model page as Published Training ID , Click on Start Mapping to See its Prediction",
- image: "/learn/20.png", // Add image URL here
+ image: "/learn-resources/20.png", // Add image URL here
},
{
id: 22,
title: "Step 22: Visualize Your Model's Prediction",
description:
"Zoom to the area you want to see predictions and Click Detect to Run your Published training Model. It will load the model and Run live predictions ",
- image: "/learn/21.png", // Add image URL here
+ image: "/learn-resources/21.png", // Add image URL here
},
{
id: 23,
title: "Step 23: Bring Predictions to OSM",
description:
"Your Predictions will be visualized on Map, Now you can bring them to OSM Modify them remove bad predictions and Push it back to OSM. fAIr should be able to have feedback loop when user discards the prediction or modifies it ( it is work in Prrogress) , you can launch JOSM with prediction data though",
- image: "/learn/22.png", // Add image URL here
+ image: "/learn-resources/22.png", // Add image URL here
},
];
diff --git a/frontend/src/components/Layout/TrainingDS/DatasetEditor/AOI.js b/frontend/src/components/Layout/TrainingDS/DatasetEditor/AOI.js
index f4d32575..8a883005 100644
--- a/frontend/src/components/Layout/TrainingDS/DatasetEditor/AOI.js
+++ b/frontend/src/components/Layout/TrainingDS/DatasetEditor/AOI.js
@@ -1,5 +1,6 @@
import React, { useContext, useEffect, useState } from "react";
import {
+ Alert,
Avatar,
Grid,
IconButton,
@@ -9,6 +10,7 @@ import {
ListItemSecondaryAction,
ListItemText,
Pagination,
+ Snackbar,
SvgIcon,
Typography,
} from "@mui/material";
@@ -42,6 +44,7 @@ const AOI = (props) => {
const [dense, setDense] = useState(true);
const count = Math.ceil(props.mapLayers.length / PER_PAGE);
let [page, setPage] = useState(1);
+ const [openSnack, setOpenSnack] = useState(false);
let _DATA = usePagination(
props.mapLayers.filter((e) => e.type === "aoi"),
PER_PAGE
@@ -82,6 +85,32 @@ const AOI = (props) => {
const { mutate: mutateFetch, data: fetchResult } =
useMutation(fetchOSMLebels);
+ const DeleteAOI = async (id, leafletId) => {
+ try {
+ const headers = {
+ "access-token": accessToken,
+ };
+
+ const res = await axios.delete(`/aoi/${id}`, {
+ headers,
+ });
+
+ if (res.error) {
+ console.log(res);
+ console.log(res.error.response.statusText);
+ } else {
+ console.log(`AOI ${id} deleted from DB`);
+
+ props.deleteAOIButton(id, leafletId);
+ return res.data;
+ }
+ } catch (e) {
+ console.log("isError", e);
+ } finally {
+ }
+ };
+ const { mutate: mutateDeleteAOI } = useMutation(DeleteAOI);
+
return (
<>
@@ -124,7 +153,7 @@ const AOI = (props) => {
<>
Area seems to be very small for an AOI
- Make sure it is not a Label
+ Please delete it and create a bigger AOI
>
) : (
""
@@ -141,7 +170,7 @@ const AOI = (props) => {
{/*
*/}
-
+ {/*
{
);
}}
>
- {/* */}
+
+ */}
+
+ {
+ try {
+ // mutateFetch(layer.aoiId);
+ console.log("layer", layer);
+
+ const Imgurl = new URL(
+ "http://127.0.0.1:8111/imagery"
+ );
+ Imgurl.searchParams.set("type", "tms");
+ Imgurl.searchParams.set(
+ "title",
+ props.oamImagery.name
+ );
+ Imgurl.searchParams.set(
+ "url",
+ props.oamImagery.url
+ );
+ const imgResponse = await fetch(Imgurl);
+ // bounds._southWest.lng,
+ // bounds._southWest.lat,
+ // bounds._northEast.lng,
+ // bounds._northEast.lat,
+ const loadurl = new URL(
+ "http://127.0.0.1:8111/load_and_zoom"
+ );
+ loadurl.searchParams.set(
+ "bottom",
+ layer.latlngs[0].lat
+ );
+ loadurl.searchParams.set(
+ "top",
+ layer.latlngs[1].lat
+ );
+ loadurl.searchParams.set(
+ "left",
+ layer.latlngs[0].lng
+ );
+ loadurl.searchParams.set(
+ "right",
+ layer.latlngs[2].lng
+ );
+ const loadResponse = await fetch(loadurl);
+
+ if (!imgResponse.ok) {
+ setOpenSnack(true);
+ }
+ } catch (error) {
+ setOpenSnack(true);
+ }
+ }}
+ >
+
+
{
+
{/* {
+
+ {
+ // console.log(
+ // `layer.aoiId ${layer.aoiId} and layer.id ${layer.id}`
+ // );
+ mutateDeleteAOI(layer.aoiId, layer.id);
+ }}
+ >
+
+
+
))}
@@ -263,6 +371,32 @@ const AOI = (props) => {
)}
+ {
+ console.log("openSnack", openSnack);
+ setOpenSnack(false);
+ }}
+ message={
+
+
+ Please make sure JOSM is open and Remote Control feature is
+ enabled{" "}
+
+ Click here for more details
+
+
+
+ }
+ // action={action}
+ color="red"
+ anchorOrigin={{ vertical: "bottom", horizontal: "right" }}
+ />
>
);
};
diff --git a/frontend/src/components/Layout/TrainingDS/DatasetEditor/DatasetEditor.js b/frontend/src/components/Layout/TrainingDS/DatasetEditor/DatasetEditor.js
index 6a45c947..10bfb8f4 100644
--- a/frontend/src/components/Layout/TrainingDS/DatasetEditor/DatasetEditor.js
+++ b/frontend/src/components/Layout/TrainingDS/DatasetEditor/DatasetEditor.js
@@ -64,6 +64,10 @@ function DatasetEditor() {
return () => {};
}, []);
const [zoom, setZoom] = useState(15);
+ const deleteAOIButton = (id, leafletId) => {
+ setMapLayers((layers) => layers.filter((l) => l.id !== leafletId));
+ window.location.reload(false);
+ };
return (
<>
{isLoading && "Loading ............"}
@@ -112,6 +116,7 @@ function DatasetEditor() {
oamImagery={oamImagery}
mapLayers={mapLayers.filter((i) => i.type === "aoi")}
selectAOIHandler={selectAOIHandler}
+ deleteAOIButton={deleteAOIButton}
>
diff --git a/frontend/src/components/Layout/TrainingDS/DatasetEditor/DatasetMap.js b/frontend/src/components/Layout/TrainingDS/DatasetEditor/DatasetMap.js
index 59cbe4a5..2cddeefb 100644
--- a/frontend/src/components/Layout/TrainingDS/DatasetEditor/DatasetMap.js
+++ b/frontend/src/components/Layout/TrainingDS/DatasetEditor/DatasetMap.js
@@ -7,10 +7,11 @@ import {
Popup,
TileLayer,
MapContainer,
- Rectangle,
+ GeoJSON,
Polygon,
useMapEvents,
} from "react-leaflet";
+
import L from "leaflet";
import { EditControl } from "react-leaflet-draw";
@@ -231,7 +232,7 @@ const DatasetMap = (props) => {
if (res.error) setMapError(res.error.response.statusText);
else {
- // console.log("getAOI", res.data)
+ // console.log("getAOI", res.data);
setFromDB(true);
return res.data;
}
@@ -420,7 +421,7 @@ const DatasetMap = (props) => {
setIsEditing(false);
};
- const blueOptions = { color: "#03002e", width: 10, opacity: 1 };
+ const blueOptions = { color: "#03002e", width: 10, opacity: 0 };
const corrdinatestoLatlngs = (layer) => {
const latlngs = [];
@@ -436,7 +437,7 @@ const DatasetMap = (props) => {
if (reactFGref) {
// make sure each layer has a featre geojson for created ones
reactFGref.eachLayer((l) => {
- // console.log("each layer", l)
+ // console.log("each layer", l);
if (l.feature === undefined) {
// console.log("mapLayers", mapLayers)
if (mapLayers.find((m) => m.id === l._leaflet_id))
@@ -630,10 +631,18 @@ const DatasetMap = (props) => {
}
/>
+
+
+
@@ -653,19 +662,34 @@ const DatasetMap = (props) => {
minZoom={props.oamImagery.minzoom}
attribution={props.oamImagery.name}
url={props.oamImagery.url}
+ maxNativeZoom={
+ props.oamImagery.url.includes("opena")
+ ? props.oamImagery.maxzoom
+ : 18
+ }
/>
)}
-
+ {/*
e.type === "aoi")
)}
/>
-
+ */}
+
+
{
_onFeatureGroupReady(reactFGref, geoJsonLoadedFile);
@@ -686,12 +710,12 @@ const DatasetMap = (props) => {
// );
_onCreate(e, "aoi");
}}
- onEdited={_onEdited}
- onDeleted={_onDeleted}
- onEditStart={_onEditStart}
- onEditStop={_onEditStop}
- onDrawStart={_onEditStart}
- onDrawStop={_onEditStop}
+ // onEdited={_onEdited}
+ // onDeleted={_onDeleted}
+ // onEditStart={_onEditStart}
+ // onEditStop={_onEditStop}
+ // onDrawStart={_onEditStart}
+ // onDrawStop={_onEditStop}
draw={{
polyline: false,
polygon: false,
diff --git a/frontend/src/index.css b/frontend/src/index.css
index fba2efac..ea32d548 100644
--- a/frontend/src/index.css
+++ b/frontend/src/index.css
@@ -136,7 +136,7 @@ code {
width: 20px;
}
-.rapid-logo-small
+.editor-logo-small
{
height: 20px;
width: 30px;
@@ -166,6 +166,11 @@ code {
margin-left: 12px !important;
}
+.margin-left-13
+{
+ margin-left: 13px !important;
+}
+
.leaflet-interactive {
width: 30px !important;
height: 30px !important;
@@ -174,4 +179,9 @@ code {
.MuiDialog-paper
{
max-width: 80% !important;
+}
+
+.leaflet-draw.leaflet-control > div:nth-child(2)
+{
+ display: none;
}
\ No newline at end of file