|
| 1 | +""" |
| 2 | +Query beyond the 2,000 rows ESRI gives. |
| 3 | +
|
| 4 | +https://gis.stackexchange.com/questions/266897/how-to-get-around-the-1000-objectids-limit-on-arcgis-server |
| 5 | +""" |
| 6 | +import urllib.parse |
| 7 | + |
| 8 | +import geopandas as gpd |
| 9 | +import numpy as np |
| 10 | +import pandas as pd |
| 11 | +import requests |
| 12 | + |
| 13 | + |
| 14 | +def query_arcgis_feature_server(url_feature_server=""): |
| 15 | + """ |
| 16 | + This function downloads all of the features available on a given ArcGIS |
| 17 | + feature server. The function is written to bypass the limitations imposed |
| 18 | + by the online service, such as only returning up to 1,000 or 2,000 featues |
| 19 | + at a time. |
| 20 | +
|
| 21 | + Parameters |
| 22 | + ---------- |
| 23 | + url_feature_server : string |
| 24 | + Sting containing the URL of the service API you want to query. It should |
| 25 | + end in a forward slash and look something like this: |
| 26 | + 'https://services.arcgis.com/P3ePLMYs2RVChkJx/arcgis/rest/services/USA_Counties/FeatureServer/0/' |
| 27 | +
|
| 28 | + Returns |
| 29 | + ------- |
| 30 | + geodata_final : gpd.GeoDataFrame |
| 31 | + This is a GeoDataFrame that contains all of the features from the |
| 32 | + Feature Server. After calling this function, the `geodata_final` object |
| 33 | + can be used to store the data on disk in several different formats |
| 34 | + including, but not limited to, Shapefile (.shp), GeoJSON (.geojson), |
| 35 | + GeoPackage (.gpkg), or PostGIS. |
| 36 | + See https://geopandas.org/en/stable/docs/user_guide/io.html#writing-spatial-data |
| 37 | + for more details. |
| 38 | +
|
| 39 | + """ |
| 40 | + if url_feature_server == "": |
| 41 | + geodata_final = gpd.GeoDataFrame() |
| 42 | + return geodata_final |
| 43 | + |
| 44 | + # Fixing last character in case the URL provided didn't end in a |
| 45 | + # forward slash |
| 46 | + if url_feature_server[-1] != "/": |
| 47 | + url_feature_server = url_feature_server + "/" |
| 48 | + |
| 49 | + # Getting the layer definitions. This contains important info such as the |
| 50 | + # name of the column used as feature_ids/object_ids, among other things. |
| 51 | + layer_def = requests.get(url_feature_server + "?f=pjson").json() |
| 52 | + |
| 53 | + # The `objectIdField` is the column name used for the |
| 54 | + # feature_ids/object_ids |
| 55 | + fid_colname = layer_def["objectIdField"] |
| 56 | + |
| 57 | + # The `maxRecordCount` tells us the maximum number of records this REST |
| 58 | + # API service can return at once. The code below is written such that we |
| 59 | + # perform multiple calls to the API, each one being short enough never to |
| 60 | + # go beyond this limit. |
| 61 | + record_count_max = layer_def["maxRecordCount"] |
| 62 | + |
| 63 | + # Part of the URL that specifically requests only the object IDs |
| 64 | + url_query_get_ids = f"query?f=geojson&returnIdsOnly=true" f"&where={fid_colname}+is+not+null" |
| 65 | + |
| 66 | + url_comb = url_feature_server + url_query_get_ids |
| 67 | + |
| 68 | + # Getting all the object IDs |
| 69 | + service_request = requests.get(url_comb) |
| 70 | + all_objectids = np.sort(service_request.json()["properties"]["objectIds"]) |
| 71 | + |
| 72 | + # This variable will store all the parts of the multiple queries. These |
| 73 | + # parts will, at the end, be concatenated into one large GeoDataFrame. |
| 74 | + geodata_parts = [] |
| 75 | + |
| 76 | + # This part of the query is fixed and never actually changes |
| 77 | + url_query_fixed = "query?f=geojson&outFields=*&where=" |
| 78 | + |
| 79 | + # Identifying the largest query size allowed per request. This will dictate |
| 80 | + # how many queries will need to be made. We start the search at |
| 81 | + # the max record count, but that generates errors sometimes - the query |
| 82 | + # might time out because it's too big. If the test query times out, we try |
| 83 | + # shrink the query size until the test query goes through without |
| 84 | + # generating a time-out error. |
| 85 | + block_size = min(record_count_max, len(all_objectids)) |
| 86 | + worked = False |
| 87 | + while not worked: |
| 88 | + # Moving the "cursors" to their appropriate locations |
| 89 | + id_start = all_objectids[0] |
| 90 | + id_end = all_objectids[block_size - 1] |
| 91 | + |
| 92 | + readable_query_string = f"{fid_colname}>={id_start} " f"and {fid_colname}<={id_end}" |
| 93 | + |
| 94 | + url_query_variable = urllib.parse.quote(readable_query_string) |
| 95 | + |
| 96 | + url_comb = url_feature_server + url_query_fixed + url_query_variable |
| 97 | + |
| 98 | + url_get = requests.get(url_comb) |
| 99 | + |
| 100 | + if "error" in url_get.json(): |
| 101 | + block_size = int(block_size / 2) + 1 |
| 102 | + else: |
| 103 | + geodata_part = gpd.read_file(url_get.text) |
| 104 | + |
| 105 | + geodata_parts.append(geodata_part.copy()) |
| 106 | + worked = True |
| 107 | + |
| 108 | + # Performing the actual query to the API multiple times. This skips the |
| 109 | + # first few rows/features in the data because those rows were already |
| 110 | + # captured in the query performed in the code chunk above. |
| 111 | + for i in range(block_size, len(all_objectids), block_size): |
| 112 | + # Moving the "cursors" to their appropriate locations and finding the |
| 113 | + # limits of each block |
| 114 | + sub_list = all_objectids[i : i + block_size] |
| 115 | + id_start = sub_list[0] |
| 116 | + id_end = sub_list[-1] |
| 117 | + |
| 118 | + readable_query_string = f"{fid_colname}>={id_start} " f"and {fid_colname}<={id_end}" |
| 119 | + |
| 120 | + # Encoding from readable text to URL |
| 121 | + url_query_variable = urllib.parse.quote(readable_query_string) |
| 122 | + |
| 123 | + # Constructing the full request URL |
| 124 | + url_comb = url_feature_server + url_query_fixed + url_query_variable |
| 125 | + |
| 126 | + # Actually performing the query and storing its results in a |
| 127 | + # GeoDataFrame |
| 128 | + geodata_part = gpd.read_file(url_comb, driver="GeoJSON") |
| 129 | + |
| 130 | + # Appending the result to `geodata_parts` |
| 131 | + if geodata_part.shape[0] > 0: |
| 132 | + geodata_parts.append(geodata_part) |
| 133 | + |
| 134 | + # Concatenating all of the query parts into one large GeoDataFrame |
| 135 | + geodata_final = pd.concat(geodata_parts, ignore_index=True).sort_values(by=fid_colname).reset_index(drop=True) |
| 136 | + |
| 137 | + # Checking if any object ID is missing |
| 138 | + ids_queried = set(geodata_final[fid_colname]) |
| 139 | + for i, this_id in enumerate(all_objectids): |
| 140 | + if this_id not in ids_queried: |
| 141 | + print("WARNING! The following ObjectID is missing from the final " f"GeoDataFrame: ObjectID={this_id}") |
| 142 | + pass |
| 143 | + |
| 144 | + # Checking if any object ID is included twice |
| 145 | + geodata_temp = geodata_final[[fid_colname]].copy() |
| 146 | + geodata_temp["temp"] = 1 |
| 147 | + geodata_temp = geodata_temp.groupby(fid_colname).agg({"temp": "sum"}).reset_index() |
| 148 | + geodata_temp = geodata_temp.loc[geodata_temp["temp"] > 1].copy() |
| 149 | + for i, this_id in enumerate(geodata_temp[fid_colname].values): |
| 150 | + n_times = geodata_temp["temp"].values[i] |
| 151 | + print( |
| 152 | + "WARNING! The following ObjectID is included multiple times in" |
| 153 | + f"the final GeoDataFrame: ObjectID={this_id}\tOccurrences={n_times}" |
| 154 | + ) |
| 155 | + |
| 156 | + return geodata_final |
0 commit comments