Skip to content

Commit f3e90b1

Browse files
committed
Initial Commit
0 parents  commit f3e90b1

File tree

270 files changed

+4180
-0
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

270 files changed

+4180
-0
lines changed

.eslintrc.json

+3
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
{
2+
"extends": "next/core-web-vitals"
3+
}

.example.env.local

+4
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
DATABASE_URL=
2+
GOOGLE_CLOUD_PROJECT=
3+
GOOGLE_CLOUD_LOCATION=
4+
GOOGLE_CLOUD_API_ENDPOINT=

.gitignore

+36
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
2+
3+
# dependencies
4+
/node_modules
5+
/.pnp
6+
.pnp.js
7+
.yarn/install-state.gz
8+
9+
# testing
10+
/coverage
11+
12+
# next.js
13+
/.next/
14+
/out/
15+
16+
# production
17+
/build
18+
19+
# misc
20+
.DS_Store
21+
*.pem
22+
23+
# debug
24+
npm-debug.log*
25+
yarn-debug.log*
26+
yarn-error.log*
27+
28+
# local env files
29+
.env*.local
30+
31+
# vercel
32+
.vercel
33+
34+
# typescript
35+
*.tsbuildinfo
36+
next-env.d.ts

README.md

+55
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,55 @@
1+
# Neon Image Search
2+
3+
This is an image search app, built using NeonDB and Vertex AI. It uses Vertex AI to convert the data images into embeddings and then stores it into NeonDB. When a request is made the request image is converted into embeddings and then the similarity search is performed by NeonDB using PGVector.
4+
5+
![preview](./assets//app_preview.png)
6+
7+
## Installation
8+
9+
Before we began with the installation make sure you're logged in, in the [Google Cloud CLI](https://cloud.google.com/docs/authentication/gcloud#gcloud-credentials).
10+
11+
1. Clone the repo
12+
13+
```bash
14+
git clone https://github.com/ItzCrazyKns/Neon-Image-Search.git
15+
```
16+
17+
2. Install dependencies
18+
19+
```bash
20+
yarn
21+
```
22+
23+
Or if you are using NPM
24+
25+
```bash
26+
npm i
27+
```
28+
29+
3. Change the name of `.example.env.local` to `.env.local` and fill all the fields.
30+
31+
4. Convert data images into embeddings. (The data images are stored in `/public/flower_images`)
32+
33+
```bash
34+
yarn run generate-embeddings
35+
```
36+
37+
Or if you are using NPM
38+
39+
```bash
40+
npm run generate-embeddings
41+
```
42+
43+
5. Then finally started the app
44+
45+
```bash
46+
yarn run dev
47+
```
48+
49+
Or if you are using NPM
50+
51+
```bash
52+
npm run dev
53+
```
54+
55+
**Note**: You can build the project then also use it by following NextJS's guidelines.

app/api/predict/route.ts

+60
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,60 @@
1+
import { NextRequest, NextResponse } from "next/server";
2+
import { neon, neonConfig } from '@neondatabase/serverless';
3+
import { PredictionServiceClient } from "@google-cloud/aiplatform";
4+
5+
neonConfig.fetchConnectionCache = true;
6+
7+
const sql = neon(process.env.DATABASE_URL!);
8+
9+
const vertexAiClient = new PredictionServiceClient({
10+
apiEndpoint: process.env.GOOGLE_CLOUD_API_ENDPOINT,
11+
})
12+
13+
export const POST = async (req: NextRequest) => {
14+
try {
15+
const data = await req.json()
16+
17+
if (!data.base64Image) return NextResponse.json({ error: 'No image provided' }, { status: 400 })
18+
19+
const response = await vertexAiClient.predict({
20+
endpoint: `projects/${process.env.GOOGLE_CLOUD_PROJECT}/locations/${process.env.GOOGLE_CLOUD_LOCATION}/publishers/google/models/multimodalembedding@001`,
21+
instances: [
22+
{
23+
structValue: {
24+
fields: {
25+
image: {
26+
structValue: {
27+
fields: {
28+
bytesBase64Encoded: {
29+
stringValue: data.base64Image,
30+
},
31+
},
32+
},
33+
},
34+
},
35+
},
36+
},
37+
],
38+
});
39+
40+
if (!response || response.length < 1 || !response[0]?.predictions || response[0]?.predictions.length < 1) return NextResponse.json({ error: 'No predictions found for image' }, { status: 404 })
41+
42+
const embeddings = response[0]?.predictions[0]?.structValue?.fields?.imageEmbedding?.listValue?.values?.map((v: any) => v.numberValue)
43+
44+
const result = await sql(`SELECT id, type, name FROM items ORDER BY embedding::VECTOR <=> '[${embeddings}]' LIMIT 8;`)
45+
46+
const similarImages = result.map((item: any) => {
47+
return {
48+
id: item.id,
49+
type: item.type,
50+
name: item.name,
51+
image: '/flower_images/' + item.type + '/' + item.name,
52+
}
53+
})
54+
55+
return NextResponse.json({ result: similarImages }, { status: 200 })
56+
} catch (err) {
57+
console.error('An error has occurred:', err)
58+
return NextResponse.json({ error: 'An error has occurred' }, { status: 500 })
59+
}
60+
}

app/favicon.ico

25.3 KB
Binary file not shown.

app/globals.css

+3
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
@tailwind base;
2+
@tailwind components;
3+
@tailwind utilities;

app/layout.tsx

+19
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
import type { Metadata } from "next";
2+
import "./globals.css";
3+
4+
export const metadata: Metadata = {
5+
title: "Image Search Engine",
6+
description: "Search for similar images using Neon DB and Vertex AI",
7+
};
8+
9+
export default function RootLayout({
10+
children,
11+
}: Readonly<{
12+
children: React.ReactNode;
13+
}>) {
14+
return (
15+
<html lang="en">
16+
<body>{children}</body>
17+
</html>
18+
);
19+
}

app/page.tsx

+75
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,75 @@
1+
'use client';
2+
3+
import Image from 'next/image';
4+
import React, { useCallback, useState } from 'react';
5+
import { useDropzone } from 'react-dropzone';
6+
7+
const Home = () => {
8+
const [filePreview, setFilePreview] = useState("");
9+
const [base64Image, setBase64Image] = useState("");
10+
const [similarImages, setSimilarImages] = useState([] as any[]);
11+
const [loading, setLoading] = useState(false)
12+
13+
const onDrop = useCallback(async (acceptedFiles: File[]) => {
14+
const file = acceptedFiles[0]
15+
const previewUrl = URL.createObjectURL(file)
16+
setFilePreview(previewUrl)
17+
const arrayBuffer = await file.arrayBuffer();
18+
setBase64Image(Buffer.from(arrayBuffer).toString('base64'))
19+
}, []);
20+
21+
const { getRootProps, getInputProps, isDragActive } = useDropzone({
22+
onDrop,
23+
multiple: false,
24+
});
25+
26+
return (
27+
<main className='bg-gray-950 min-h-screen flex flex-col items-center justify-center'>
28+
<div className='max-w-screen-xl m-8 xl:m-auto flex flex-col items-center justify-center'>
29+
<h2 className='text-lg sm:text-xl md:text-2xl xl:text-4xl font-semibold text-white text-center mt-8'><span className='bg-gradient-to-br from-green-600 to-green-300 bg-clip-text text-transparent p-1'>Search</span> for similar images 🚀</h2>
30+
<p className='text-gray-300 text-xs md:text-sm max-w-[300px] text-center mt-0 xl:mt-2 2xl:mt-3'>Upload an image of Lilly, Lotus, Orchid, Sunflower, or Tulip and find images similar to it.</p>
31+
32+
<div className='text-white border border-white/30 rounded-lg cursor-pointer mt-8' {...getRootProps()}>
33+
<input {...getInputProps()} />
34+
{
35+
isDragActive ?
36+
<p className='my-10 mx-5 text-center'>Drop the file here ...</p> :
37+
filePreview ?
38+
<div>
39+
<Image width={512} height={512} src={filePreview} alt="Preview" className='h-[200px] w-[200px] md:h-[300px] md:w-auto m-1 md:m-2 xl:m-4 rounded-lg' />
40+
</div> :
41+
<p className='my-10 mx-5 text-center text-xs lg:text-lg'>Drag &apos;n&apos; drop a file here, or click to select a file</p>
42+
}
43+
</div>
44+
<button
45+
onClick={async () => {
46+
setLoading(true)
47+
const res = await fetch('/api/predict', {
48+
method: "POST",
49+
body: JSON.stringify({
50+
base64Image: base64Image
51+
})
52+
})
53+
54+
const data = await res.json()
55+
setSimilarImages(data.result)
56+
setLoading(false)
57+
}}
58+
disabled={loading}
59+
className='bg-gradient-to-br disabled:opacity-70 from-green-600 to-green-300 border border-green-600 rounded-lg w-full md:w-2/4 xl:w-4/12 my-4 text-white font-semibold text-sm xl:text-lg py-2 hover:opacity-90 transition duration-200'
60+
>Search
61+
</button>
62+
<div className='grid grid-cols-1 sm:grid-cols-2 md:grid-cols-3 gap-4 mb-8'>
63+
{similarImages.map((image, index) => (
64+
<div key={index} className='flex flex-col items-center border border-white/30 rounded-lg pb-2'>
65+
<Image width={512} height={512} src={image.image} alt={image.name} className='h-[200px] w-[200px] md:h-[250px] md:w-auto m-1 md:m-2 xl:m-4 rounded-lg' />
66+
<p className='text-white font-semibold text-lg xl:-mt-2'>{image.type}</p>
67+
</div>
68+
))}
69+
</div>
70+
</div>
71+
</main>
72+
);
73+
}
74+
75+
export default Home;

assets/app_preview.png

1.94 MB
Loading

generateEmbeddings.ts

+120
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,120 @@
1+
import { Pool } from 'pg';
2+
import * as fs from "fs/promises"
3+
import * as path from 'path';
4+
import dotenv from 'dotenv';
5+
import { PredictionServiceClient } from '@google-cloud/aiplatform';
6+
7+
dotenv.config({
8+
path: path.join(__dirname, '.env.local')
9+
});
10+
11+
const vertexAiClient = new PredictionServiceClient({
12+
apiEndpoint: process.env.GOOGLE_CLOUD_API_ENDPOINT,
13+
})
14+
15+
const pool = new Pool({
16+
connectionString: process.env.DATABASE_URL
17+
})
18+
19+
pool.connect()
20+
21+
const baseDir = "./public/flower_images";
22+
23+
const listFolders = async (baseDir: string): Promise<string[]> => {
24+
const items = await fs.readdir(baseDir, { withFileTypes: true });
25+
return items.filter(item => item.isDirectory()).map(item => path.join(baseDir, item.name));
26+
};
27+
28+
const listImages = async (folderPath: string): Promise<{ name: string; path: string; folderPath: string; }[]> => {
29+
const items = await fs.readdir(folderPath, { withFileTypes: true })
30+
31+
return items.filter(item => item.isFile() && (item.name.includes('.jpg') || item.name.includes('.jpeg') || item.name.includes('.png'))).slice(0, 50).map(item => {
32+
return {
33+
path: path.join(folderPath, item.name),
34+
folderPath: folderPath.split('\\').pop()!,
35+
name: item.name
36+
}
37+
})
38+
}
39+
40+
const imageToBase64 = async (filePath: string): Promise<string> => {
41+
const fileBuffer = await fs.readFile(filePath)
42+
return fileBuffer.toString('base64')
43+
}
44+
45+
const convertImagesToEmbeddings = async (): Promise<any[]> => {
46+
const embeddings: any[] = []
47+
const folders = await listFolders(baseDir)
48+
49+
for await (const folder of folders) {
50+
const images = await listImages(folder)
51+
for await (const image of images) {
52+
const base64Image = await imageToBase64(image.path)
53+
54+
const response = await vertexAiClient.predict({
55+
endpoint: `projects/${process.env.GOOGLE_CLOUD_PROJECT}/locations/${process.env.GOOGLE_CLOUD_LOCATION}/publishers/google/models/multimodalembedding@001`,
56+
instances: [
57+
{
58+
structValue: {
59+
fields: {
60+
image: {
61+
structValue: {
62+
fields: {
63+
bytesBase64Encoded: {
64+
stringValue: base64Image,
65+
},
66+
},
67+
},
68+
},
69+
},
70+
},
71+
},
72+
],
73+
});
74+
75+
if (!response || response.length < 1 || !response[0]?.predictions || response[0]?.predictions.length < 1) {
76+
console.error('No predictions found for image', image.path);
77+
continue;
78+
}
79+
80+
embeddings.push({
81+
embeddings: response[0]?.predictions[0]?.structValue?.fields?.imageEmbedding?.listValue?.values?.map((v) => v.numberValue),
82+
type: image.folderPath.split('\\').pop()!,
83+
name: image.name
84+
})
85+
}
86+
}
87+
88+
return embeddings
89+
};
90+
91+
const generateEmbeddings = async () => {
92+
const client = await pool.connect()
93+
try {
94+
await client.query(`
95+
CREATE EXTENSION IF NOT EXISTS vector;
96+
CREATE TABLE IF NOT EXISTS items (
97+
id SERIAL PRIMARY KEY,
98+
type VARCHAR(255) NOT NULL,
99+
name VARCHAR(255) NOT NULL,
100+
embedding VECTOR(1408) NOT NULL
101+
)
102+
`)
103+
console.log('Successfully created tables')
104+
console.log('Generating embeddings...')
105+
106+
const embeddings = await convertImagesToEmbeddings()
107+
console.log(embeddings)
108+
console.log('Inserting embeddings into database...')
109+
await client.query(`
110+
INSERT INTO items (type, name, embedding) VALUES ${embeddings.map((e) => `('${e.type}', '${e.name}', '[${e.embeddings}]')`).join(',')}
111+
`)
112+
console.log('Successfully inserted embeddings into database')
113+
} catch (err) {
114+
console.error('An error has occurred:', err)
115+
} finally {
116+
client.release()
117+
}
118+
}
119+
120+
generateEmbeddings()

lib/vertexAiClient.ts

+7
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
import { PredictionServiceClient } from "@google-cloud/aiplatform";
2+
3+
const vertexAiClient = new PredictionServiceClient({
4+
apiEndpoint: process.env.GOOGLE_CLOUD_API_ENDPOINT,
5+
})
6+
7+
export default vertexAiClient

0 commit comments

Comments
 (0)