diff options
author | jkim <jkim@FreeBSD.org> | 2011-03-30 00:46:53 +0800 |
---|---|---|
committer | jkim <jkim@FreeBSD.org> | 2011-03-30 00:46:53 +0800 |
commit | c65a662ee62c05f63b1d6732b26dcb703a7899e7 (patch) | |
tree | 8e5e08bd2583f6db2d511aa8d22f195a2304f582 /www/nspluginwrapper-devel | |
parent | a6f88848f67d064a80271aaa8415a15afa7489af (diff) | |
download | freebsd-ports-gnome-c65a662ee62c05f63b1d6732b26dcb703a7899e7.tar.gz freebsd-ports-gnome-c65a662ee62c05f63b1d6732b26dcb703a7899e7.tar.zst freebsd-ports-gnome-c65a662ee62c05f63b1d6732b26dcb703a7899e7.zip |
Fix a race condition on NPP_Destroy. With this patch, Adobe Flash plugin
is quite bearable now. For more detailed info, please see the Red Hat PR:
https://bugzilla.redhat.com/show_bug.cgi?id=680279
Obtained from: Fedora Project
Diffstat (limited to 'www/nspluginwrapper-devel')
-rw-r--r-- | www/nspluginwrapper-devel/Makefile | 4 | ||||
-rw-r--r-- | www/nspluginwrapper-devel/distinfo | 4 | ||||
-rw-r--r-- | www/nspluginwrapper-devel/files/patch-racecond | 209 |
3 files changed, 213 insertions, 4 deletions
diff --git a/www/nspluginwrapper-devel/Makefile b/www/nspluginwrapper-devel/Makefile index 99ade613b11c..856cc4ed6d02 100644 --- a/www/nspluginwrapper-devel/Makefile +++ b/www/nspluginwrapper-devel/Makefile @@ -7,7 +7,7 @@ PORTNAME= nspluginwrapper PORTVERSION= 1.3.0 -PORTREVISION= 7 +PORTREVISION= 8 CATEGORIES= www linux emulators MASTER_SITES= LOCAL/jkim DISTFILES= ${DISTNAME}${EXTRACT_SUFX} ${BINFILE}${EXTRACT_SUFX} @@ -21,7 +21,7 @@ LATEST_LINK= ${PORTNAME}-devel CONFLICTS= nspluginwrapper-1.2.* -BINFILE= ${PORTNAME}-i386-${PORTVERSION}-4 +BINFILE= ${PORTNAME}-i386-${PORTVERSION}-5 ONLY_FOR_ARCHS= i386 amd64 diff --git a/www/nspluginwrapper-devel/distinfo b/www/nspluginwrapper-devel/distinfo index f676e1dc98da..750bfdee648d 100644 --- a/www/nspluginwrapper-devel/distinfo +++ b/www/nspluginwrapper-devel/distinfo @@ -1,4 +1,4 @@ SHA256 (nspluginwrapper-1.3.0.tar.bz2) = b8900bd5f0bd950f95b963e5c8f5e20d7d15cc70e777b6058289687ad968b21c SIZE (nspluginwrapper-1.3.0.tar.bz2) = 336049 -SHA256 (nspluginwrapper-i386-1.3.0-4.tar.bz2) = 1db3b9992dd45a3b1e579c18e3b2696103d8fedaa312da71835af55e9a323d0a -SIZE (nspluginwrapper-i386-1.3.0-4.tar.bz2) = 62112 +SHA256 (nspluginwrapper-i386-1.3.0-5.tar.bz2) = 57904964581c0b171be392fce30542941caf9a848b8b1db4987171949615283a +SIZE (nspluginwrapper-i386-1.3.0-5.tar.bz2) = 62313 diff --git a/www/nspluginwrapper-devel/files/patch-racecond b/www/nspluginwrapper-devel/files/patch-racecond new file mode 100644 index 000000000000..97960e4a113b --- /dev/null +++ b/www/nspluginwrapper-devel/files/patch-racecond @@ -0,0 +1,209 @@ +--- src/npw-viewer.c.orig 2011-03-28 17:46:24.000000000 -0400 ++++ src/npw-viewer.c 2011-03-28 17:52:16.000000000 -0400 +@@ -221,8 +221,17 @@ + static GList *g_delayed_calls = NULL; + static guint g_delayed_calls_id = 0; + ++// We put delayed NPP_Destroy calls on a separate list because, unlike ++// NPN_ReleaseObject, these must be called on a clean stack and have no ++// other cause to get cleared. Otherwise, it is possible for the ++// delayed_calls_process in g_NPP_Destroy_Now to call it early. ++static GList *g_delayed_destroys = NULL; ++static guint g_delayed_destroys_id = 0; ++ + static void g_NPN_ReleaseObject_Now(NPObject *npobj); ++static NPError g_NPP_Destroy_Now(PluginInstance *plugin, NPSavedData **sdata); + static gboolean delayed_calls_process_cb(gpointer user_data); ++static gboolean delayed_destroys_process_cb(gpointer user_data); + + static void delayed_calls_add(int type, gpointer data) + { +@@ -238,13 +247,19 @@ + delayed_calls_process_cb, NULL, NULL); + } + ++static void delayed_destroys_add(PluginInstance *plugin) ++{ ++ g_delayed_destroys = g_list_append(g_delayed_destroys, plugin); ++ ++ if (g_delayed_destroys_id == 0) ++ g_delayed_destroys_id = g_idle_add_full(G_PRIORITY_LOW, ++ delayed_destroys_process_cb, NULL, NULL); ++} ++ + // Returns whether there are pending calls left in the queue + static gboolean delayed_calls_process(PluginInstance *plugin, gboolean is_in_NPP_Destroy) + { +- GList *l = g_delayed_calls; +- while (l != NULL) { +- GList *cl = l; +- l = l->next; ++ while (g_delayed_calls != NULL) { + + if (!is_in_NPP_Destroy) { + /* Continue later if there is incoming RPC */ +@@ -252,7 +267,11 @@ + return TRUE; + } + +- DelayedCall *dcall = (DelayedCall *)cl->data; ++ DelayedCall *dcall = (DelayedCall *)g_delayed_calls->data; ++ /* XXX: Remove the link first; this function /must/ be ++ * re-entrant. We may be called again while processing the ++ * delayed call. */ ++ g_delayed_calls = g_list_delete_link(g_delayed_calls, g_delayed_calls); + switch (dcall->type) { + case RPC_DELAYED_NPN_RELEASE_OBJECT: + { +@@ -262,7 +281,6 @@ + } + } + NPW_MemFree(dcall); +- g_delayed_calls = g_list_delete_link(g_delayed_calls, cl); + } + + if (g_delayed_calls) +@@ -280,6 +298,25 @@ + return delayed_calls_process(NULL, FALSE); + } + ++static gboolean delayed_destroys_process_cb(gpointer user_data) ++{ ++ while (g_delayed_destroys != NULL) { ++ PluginInstance *plugin = (PluginInstance *)g_delayed_destroys->data; ++ g_delayed_destroys = g_list_delete_link(g_delayed_destroys, ++ g_delayed_destroys); ++ g_NPP_Destroy_Now(plugin, NULL); ++ } ++ ++ if (g_delayed_destroys) ++ return TRUE; ++ ++ if (g_delayed_destroys_id) { ++ g_source_remove(g_delayed_destroys_id); ++ g_delayed_destroys_id = 0; ++ } ++ return FALSE; ++} ++ + // NPIdentifier cache + static inline bool use_npidentifier_cache(void) + { +@@ -741,7 +778,6 @@ + } + } + +- + /* ====================================================================== */ + /* === XPCOM glue === */ + /* ====================================================================== */ +@@ -3338,6 +3374,13 @@ + return error; + } + ++ /* Clear any NPP_Destroys we may have delayed. Although it doesn't ++ really matter, and the plugin is going to die soon. ++ ++ XXX: To be really picky, we should probably delay this and make ++ sure it is run on a new event loop iteration. */ ++ delayed_destroys_process_cb(NULL); ++ + NPError ret = g_NP_Shutdown(); + return rpc_method_send_reply(connection, RPC_TYPE_INT32, ret, RPC_TYPE_INVALID); + } +@@ -3459,6 +3502,8 @@ + + // Process all pending calls as the data could become junk afterwards + // XXX: this also processes delayed calls from other instances ++ // XXX: Also, if this was delayed, the NPN_ReleaseObject calls will ++ // be ignored; the browser thinks we've already died. + delayed_calls_process(plugin, TRUE); + + D(bugiI("NPP_Destroy instance=%p\n", instance)); +@@ -3473,6 +3518,22 @@ + return ret; + } + ++static NPError g_NPP_Destroy_Now(PluginInstance *plugin, NPSavedData **save) ++{ ++ D(bug("g_NPP_Destroy_Now\n")); ++ ++ NPSavedData *save_area = NULL; ++ NPError ret = g_NPP_Destroy(PLUGIN_INSTANCE_NPP(plugin), &save_area); ++ if (save) { ++ *save = save_area; ++ } else if (save_area) { ++ npw_printf("WARNING: NPP_Destroy returned save_area, but it was ignored\n"); ++ } ++ ++ rpc_connection_unref(g_rpc_connection); ++ return ret; ++} ++ + static int handle_NPP_Destroy(rpc_connection_t *connection) + { + D(bug("handle_NPP_Destroy\n")); +@@ -3488,8 +3549,26 @@ + return error; + } + +- NPSavedData *save_area; +- NPError ret = g_NPP_Destroy(PLUGIN_INSTANCE_NPP(plugin), &save_area); ++ NPSavedData *save_area = NULL; ++ NPError ret = NPERR_NO_ERROR; ++ /* Take a ref for the rpc_method_send_reply; otherwise the ++ * rpc_connection_unref in g_NPP_Destroy_Now may cause a slight ++ * nuisance. */ ++ rpc_connection_ref(connection); ++ if (!rpc_method_in_invoke(connection)) { ++ /* The plugin is not on the stack; it's safe to call this. */ ++ D(bug("NPP_Destroy is fine.\n")); ++ ret = g_NPP_Destroy_Now(plugin, &save_area); ++ } else { ++ /* It is not safe to call NPP_Destroy right now. Delay it until we ++ * return to the event loop. ++ * ++ * NOTE: This means that the browser never sees the real return ++ * value of NPP_Destroy; the NPSavedData will be discarded, and any ++ * error code will be ignored. */ ++ D(bug("NPP_Destroy raced; delaying it to get a clean stack.\n")); ++ delayed_destroys_add(plugin); ++ } + + error = rpc_method_send_reply(connection, + RPC_TYPE_INT32, ret, +--- src/rpc.c.orig 2009-01-02 09:22:29.000000000 -0500 ++++ src/rpc.c 2011-03-28 17:52:16.000000000 -0400 +@@ -2097,6 +2097,22 @@ + return ret; + } + ++bool rpc_method_in_invoke(rpc_connection_t *connection) ++{ ++ D(bug("rpc_method_in_invoke\n")); ++ if (connection == NULL) ++ return false; ++ // Our stack should alternate between handle/dispatch and ++ // invokes. Some calls are only safe to handle called from an event ++ // loop. In this case, we should have values invoke_depth = 0; ++ // handle_depth = 1; dispatch_depth = 1 ++ D(bug("invoke_depth = %d; dispatch_depth = %d; handle_depth = %d\n", ++ connection->invoke_depth, ++ connection->dispatch_depth, ++ connection->handle_depth)); ++ return connection->invoke_depth > 0; ++} ++ + + /* ====================================================================== */ + /* === Test Program === */ +--- src/rpc.h.orig 2009-01-02 09:22:29.000000000 -0500 ++++ src/rpc.h 2011-03-28 17:52:16.000000000 -0400 +@@ -119,6 +119,7 @@ + extern int rpc_method_wait_for_reply(rpc_connection_t *connection, ...) attribute_hidden; + extern int rpc_method_get_args(rpc_connection_t *connection, ...) attribute_hidden; + extern int rpc_method_send_reply(rpc_connection_t *connection, ...) attribute_hidden; ++extern bool rpc_method_in_invoke(rpc_connection_t *connection) attribute_hidden; + + #ifdef __cplusplus + } |