deepak191z commited on
Commit
c3bdfc2
·
verified ·
1 Parent(s): 66ae0d0

Update server.js

Browse files
Files changed (1) hide show
  1. server.js +313 -40
server.js CHANGED
@@ -1,58 +1,331 @@
1
- 'use strict';
 
 
 
2
 
3
- import express from 'express';
4
- import fetch from 'node-fetch';
5
- import sharp from 'sharp';
6
- import Lens from 'chrome-lens-ocr';
7
 
8
- // Constants
9
- const PORT = 7860;
10
- const HOST = '0.0.0.0';
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
- // App
13
- const app = express();
14
- const lens = new Lens();
15
 
16
- app.get('/', (req, res) => {
17
- res.send('Hello World from ExpressJS! This example is from the NodeJS Docs: https://nodejs.org/en/docs/guides/nodejs-docker-webapp/');
18
- });
 
 
19
 
20
- app.get('/scanByUrl', async (req, res) => {
21
- const { url } = req.query;
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
- if (!url) {
24
- return res.status(400).json({ error: 'Image URL is required' });
 
 
 
 
 
 
 
 
25
  }
 
26
 
 
 
27
  try {
28
- // Fetch the image
29
- const response = await fetch(url);
30
- if (!response.ok) throw new Error('Failed to fetch image');
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
- const buffer = await response.arrayBuffer();
33
- const imageBuffer = Buffer.from(buffer);
 
 
 
 
 
 
 
 
 
 
 
 
34
 
35
- // Get image metadata
36
- const metadata = await sharp(imageBuffer).metadata();
 
 
 
 
 
 
 
 
 
 
 
37
 
38
- // Resize if necessary
39
- let processedImage = imageBuffer;
40
- if (metadata.width > 1000 || metadata.height > 1000) {
41
- processedImage = await sharp(imageBuffer)
42
- .resize(1000, 1000, { fit: 'inside' })
43
- .toBuffer();
44
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
 
46
- // Scan with Chrome Lens OCR
47
- const data = await lens.scanByBuffer(processedImage);
48
- const combinedText = data.segments.map(segment => segment.text).join('\n\n');
 
 
 
 
49
 
50
- res.json({ combinedText, detailedData: data });
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
  } catch (error) {
52
- res.status(500).json({ error: error.message });
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  }
54
- });
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
 
56
- app.listen(PORT, HOST, () => {
57
- console.log(`Running on http://${HOST}:${PORT}`);
58
- });
 
1
+ import { createServerAdapter } from '@whatwg-node/server'
2
+ import { AutoRouter, json, error, cors } from 'itty-router'
3
+ import { createServer } from 'http'
4
+ import dotenv from 'dotenv'
5
 
6
+ dotenv.config()
 
 
 
7
 
8
+ class Config {
9
+ constructor() {
10
+ this.PORT = process.env.PORT || 8787
11
+ this.API_PREFIX = process.env.API_PREFIX || '/'
12
+ this.API_KEY = process.env.API_KEY || ''
13
+ this.MAX_RETRY_COUNT = process.env.MAX_RETRY_COUNT || 3
14
+ this.RETRY_DELAY = process.env.RETRY_DELAY || 5000
15
+ this.FAKE_HEADERS = process.env.FAKE_HEADERS || {
16
+ Accept: '*/*',
17
+ 'Accept-Encoding': 'gzip, deflate, br, zstd',
18
+ 'Accept-Language': 'zh-CN,zh;q=0.9',
19
+ Origin: 'https://duckduckgo.com/',
20
+ Cookie: 'dcm=3',
21
+ Dnt: '1',
22
+ Priority: 'u=1, i',
23
+ Referer: 'https://duckduckgo.com/',
24
+ 'Sec-Ch-Ua': '"Chromium";v="130", "Microsoft Edge";v="130", "Not?A_Brand";v="99"',
25
+ 'Sec-Ch-Ua-Mobile': '?0',
26
+ 'Sec-Ch-Ua-Platform': '"Windows"',
27
+ 'Sec-Fetch-Dest': 'empty',
28
+ 'Sec-Fetch-Mode': 'cors',
29
+ 'Sec-Fetch-Site': 'same-origin',
30
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36 Edg/130.0.0.0',
31
+ }
32
+ }
33
+ }
34
+
35
+ const config = new Config()
36
+
37
+ const { preflight, corsify } = cors({
38
+ origin: '*',
39
+ allowMethods: '*',
40
+ exposeHeaders: '*',
41
+ })
42
+
43
+ const withBenchmarking = (request) => {
44
+ request.start = Date.now()
45
+ }
46
+
47
+ const withAuth = (request) => {
48
+ if (config.API_KEY) {
49
+ const authHeader = request.headers.get('Authorization')
50
+ if (!authHeader || !authHeader.startsWith('Bearer ')) {
51
+ return error(401, 'Unauthorized: Missing or invalid Authorization header')
52
+ }
53
+ const token = authHeader.substring(7)
54
+ if (token !== config.API_KEY) {
55
+ return error(403, 'Forbidden: Invalid API key')
56
+ }
57
+ }
58
+ }
59
 
60
+ const logger = (res, req) => {
61
+ console.log(req.method, res.status, req.url, Date.now() - req.start, 'ms')
62
+ }
63
 
64
+ const router = AutoRouter({
65
+ before: [withBenchmarking, preflight, withAuth],
66
+ missing: () => error(404, '404 Not Found. Please check whether the calling URL is correct.'),
67
+ finally: [corsify, logger],
68
+ })
69
 
70
+ router.get('/', () => json({ message: 'API 服务运行中~' }))
71
+ router.get('/ping', () => json({ message: 'pong' }))
72
+ router.get(config.API_PREFIX + '/v1/models', () =>
73
+ json({
74
+ object: 'list',
75
+ data: [
76
+ { id: 'gpt-4o-mini', object: 'model', owned_by: 'ddg' },
77
+ { id: 'claude-3-haiku', object: 'model', owned_by: 'ddg' },
78
+ { id: 'llama-3.1-70b', object: 'model', owned_by: 'ddg' },
79
+ { id: 'mixtral-8x7b', object: 'model', owned_by: 'ddg' },
80
+ { id: 'o3-mini', object: 'model', owned_by: 'ddg' },
81
+ ],
82
+ })
83
+ )
84
 
85
+ router.post(config.API_PREFIX + '/v1/chat/completions', (req) => handleCompletion(req))
86
+
87
+ async function handleCompletion(request) {
88
+ try {
89
+ const { model: inputModel, messages, stream: returnStream } = await request.json()
90
+ const model = convertModel(inputModel)
91
+ const content = messagesPrepare(messages)
92
+ return createCompletion(model, content, returnStream)
93
+ } catch (err) {
94
+ return error(500, err.message)
95
  }
96
+ }
97
 
98
+ async function createCompletion(model, content, returnStream, retryCount = 0) {
99
+ const token = await requestToken()
100
  try {
101
+ const response = await fetch(`https://duckduckgo.com/duckchat/v1/chat`, {
102
+ method: 'POST',
103
+ headers: {
104
+ ...config.FAKE_HEADERS,
105
+ Accept: 'text/event-stream',
106
+ 'Content-Type': 'application/json',
107
+ 'x-vqd-4': token,
108
+ },
109
+ body: JSON.stringify({
110
+ model: model,
111
+ messages: [
112
+ {
113
+ role: 'user',
114
+ content: content,
115
+ },
116
+ ],
117
+ }),
118
+ })
119
 
120
+ if (!response.ok) {
121
+ throw new Error(`Create Completion error! status: ${response.status}`)
122
+ }
123
+ return handlerStream(model, response.body, returnStream)
124
+ } catch (err) {
125
+ console.log(err)
126
+ if (retryCount < config.MAX_RETRY_COUNT) {
127
+ console.log('Retrying... count', ++retryCount)
128
+ await new Promise((resolve) => setTimeout(resolve, config.RETRY_DELAY))
129
+ return await createCompletion(model, content, returnStream, retryCount)
130
+ }
131
+ throw err
132
+ }
133
+ }
134
 
135
+ async function handlerStream(model, rb, returnStream) {
136
+ let bwzChunk = ''
137
+ let previousText = ''
138
+ const handChunkData = (chunk) => {
139
+ chunk = chunk.trim()
140
+ if (bwzChunk != '') {
141
+ chunk = bwzChunk + chunk
142
+ bwzChunk = ''
143
+ }
144
+
145
+ if (chunk.includes('[DONE]')) {
146
+ return chunk
147
+ }
148
 
149
+ if (chunk.slice(-2) !== '"}') {
150
+ bwzChunk = chunk
 
 
 
 
151
  }
152
+ return chunk
153
+ }
154
+ const reader = rb.getReader()
155
+ const decoder = new TextDecoder()
156
+ const encoder = new TextEncoder()
157
+ const stream = new ReadableStream({
158
+ async start(controller) {
159
+ while (true) {
160
+ const { done, value } = await reader.read()
161
+ if (done) {
162
+ return controller.close()
163
+ }
164
+ const chunkStr = handChunkData(decoder.decode(value))
165
+ if (bwzChunk !== '') {
166
+ continue
167
+ }
168
 
169
+ chunkStr.split('\n').forEach((line) => {
170
+ if (line.length < 6) {
171
+ return
172
+ }
173
+ line = line.slice(6)
174
+ if (line !== '[DONE]') {
175
+ const originReq = JSON.parse(line)
176
 
177
+ if (originReq.action !== 'success') {
178
+ return controller.error(new Error('Error: originReq stream chunk is not success'))
179
+ }
180
+
181
+ if (originReq.message) {
182
+ previousText += originReq.message
183
+ if (returnStream) {
184
+ controller.enqueue(
185
+ encoder.encode(`data: ${JSON.stringify(newChatCompletionChunkWithModel(originReq.message, originReq.model))}\n\n`)
186
+ )
187
+ }
188
+ }
189
+ } else {
190
+ if (returnStream) {
191
+ controller.enqueue(encoder.encode(`data: ${JSON.stringify(newStopChunkWithModel('stop', model))}\n\n`))
192
+ } else {
193
+ controller.enqueue(encoder.encode(JSON.stringify(newChatCompletionWithModel(previousText, model))))
194
+ }
195
+ return controller.close()
196
+ }
197
+ })
198
+ continue
199
+ }
200
+ },
201
+ })
202
+
203
+ return new Response(stream, {
204
+ headers: {
205
+ 'Content-Type': returnStream ? 'text/event-stream' : 'application/json',
206
+ },
207
+ })
208
+ }
209
+
210
+ function messagesPrepare(messages) {
211
+ let content = ''
212
+ for (const message of messages) {
213
+ let role = message.role === 'system' ? 'user' : message.role
214
+
215
+ if (['user', 'assistant'].includes(role)) {
216
+ const contentStr = Array.isArray(message.content)
217
+ ? message.content
218
+ .filter((item) => item.text)
219
+ .map((item) => item.text)
220
+ .join('') || ''
221
+ : message.content
222
+ content += `${role}:${contentStr};\r\n`
223
+ }
224
+ }
225
+ return content
226
+ }
227
+
228
+ async function requestToken() {
229
+ try {
230
+ const response = await fetch(`https://duckduckgo.com/duckchat/v1/status`, {
231
+ method: 'GET',
232
+ headers: {
233
+ ...config.FAKE_HEADERS,
234
+ 'x-vqd-accept': '1',
235
+ },
236
+ })
237
+ const token = response.headers.get('x-vqd-4')
238
+ return token
239
  } catch (error) {
240
+ console.log("Request token error: ", err)
241
+ }
242
+ }
243
+
244
+ function convertModel(inputModel) {
245
+ let model
246
+ switch (inputModel.toLowerCase()) {
247
+ case 'claude-3-haiku':
248
+ model = 'claude-3-haiku-20240307'
249
+ break
250
+ case 'llama-3.1-70b':
251
+ model = 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo'
252
+ break
253
+ case 'mixtral-8x7b':
254
+ model = 'mistralai/Mixtral-8x7B-Instruct-v0.1'
255
+ break
256
+ case 'o3-mini':
257
+ model = 'o3-mini'
258
+ break
259
+ }
260
+ return model || 'gpt-4o-mini'
261
+ }
262
+
263
+ function newChatCompletionChunkWithModel(text, model) {
264
+ return {
265
+ id: 'chatcmpl-QXlha2FBbmROaXhpZUFyZUF3ZXNvbWUK',
266
+ object: 'chat.completion.chunk',
267
+ created: 0,
268
+ model,
269
+ choices: [
270
+ {
271
+ index: 0,
272
+ delta: {
273
+ content: text,
274
+ },
275
+ finish_reason: null,
276
+ },
277
+ ],
278
+ }
279
+ }
280
+
281
+ function newStopChunkWithModel(reason, model) {
282
+ return {
283
+ id: 'chatcmpl-QXlha2FBbmROaXhpZUFyZUF3ZXNvbWUK',
284
+ object: 'chat.completion.chunk',
285
+ created: 0,
286
+ model,
287
+ choices: [
288
+ {
289
+ index: 0,
290
+ finish_reason: reason,
291
+ },
292
+ ],
293
  }
294
+ }
295
+
296
+ function newChatCompletionWithModel(text, model) {
297
+ return {
298
+ id: 'chatcmpl-QXlha2FBbmROaXhpZUFyZUF3ZXNvbWUK',
299
+ object: 'chat.completion',
300
+ created: 0,
301
+ model,
302
+ usage: {
303
+ prompt_tokens: 0,
304
+ completion_tokens: 0,
305
+ total_tokens: 0,
306
+ },
307
+ choices: [
308
+ {
309
+ message: {
310
+ content: text,
311
+ role: 'assistant',
312
+ },
313
+ index: 0,
314
+ },
315
+ ],
316
+ }
317
+ }
318
+
319
+ // Serverless Service
320
+
321
+ (async () => {
322
+ //For Cloudflare Workers
323
+ if (typeof addEventListener === 'function') return
324
+ // For Nodejs
325
+ const ittyServer = createServerAdapter(router.fetch)
326
+ console.log(`Listening on http://0.0.0.0:7860`)
327
+ const httpServer = createServer(ittyServer)
328
+ httpServer.listen(config.PORT)
329
+ })()
330
 
331
+ // export default router